From 774b827b2e06bf87c30f6227f3637273668fb0b7 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 13 May 2021 22:00:21 -0600 Subject: [PATCH 01/44] add smb-client driver, share code with nfs-client driver Signed-off-by: Travis Glenn Hansen --- README.md | 3 + examples/smb-client.yaml | 10 + src/driver/controller-client-common/index.js | 672 +++++++++++++++++++ src/driver/controller-nfs-client/index.js | 653 +----------------- src/driver/controller-smb-client/index.js | 31 + src/driver/factory.js | 3 + 6 files changed, 729 insertions(+), 643 deletions(-) create mode 100644 examples/smb-client.yaml create mode 100644 src/driver/controller-client-common/index.js create mode 100644 src/driver/controller-smb-client/index.js diff --git a/README.md b/README.md index 9c6e9d8..47d13fd 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,8 @@ have access to resizing, snapshots, clones, etc functionality. - `zfs-local-ephemeral-inline` (provisions node-local zfs datasets) - `nfs-client` (crudely provisions storage using a shared nfs share/directory for all volumes) + - `smb-client` (crudely provisions storage using a shared smb share/directory + for all volumes) - `node-manual` (allows connecting to manually created smb, nfs, and iscsi volumes, see sample PVs in the `examples` directory) - framework for developing `csi` drivers @@ -172,6 +174,7 @@ non-`root` user when connecting to the FreeNAS server: ``` csi ALL=(ALL) NOPASSWD:ALL ``` + (note this can get reset by FreeNAS if you alter the user via the GUI later) diff --git a/examples/smb-client.yaml b/examples/smb-client.yaml new file mode 100644 index 0000000..f4a6646 --- /dev/null +++ b/examples/smb-client.yaml @@ -0,0 +1,10 @@ +driver: smb-client +instance_id: +smb: + shareHost: server address + shareBasePath: "someshare/path" + # shareHost:shareBasePath should be mounted at this location in the controller container + controllerBasePath: "/storage" + dirPermissionsMode: "0777" + dirPermissionsUser: root + dirPermissionsGroup: wheel diff --git a/src/driver/controller-client-common/index.js b/src/driver/controller-client-common/index.js new file mode 100644 index 0000000..64e0596 --- /dev/null +++ b/src/driver/controller-client-common/index.js @@ -0,0 +1,672 @@ +const { CsiBaseDriver } = require("../index"); +const { GrpcError, grpc } = require("../../utils/grpc"); +const cp = require("child_process"); +const { Mount } = require("../../utils/mount"); + +/** + * Crude nfs-client driver which simply creates directories to be mounted + * and uses rsync for cloning/snapshots + */ +class ControllerClientCommonDriver extends CsiBaseDriver { + constructor(ctx, options) { + super(...arguments); + + options = options || {}; + options.service = options.service || {}; + options.service.identity = options.service.identity || {}; + options.service.controller = options.service.controller || {}; + options.service.node = options.service.node || {}; + + options.service.identity.capabilities = + options.service.identity.capabilities || {}; + + options.service.controller.capabilities = + options.service.controller.capabilities || {}; + + options.service.node.capabilities = options.service.node.capabilities || {}; + + if (!("service" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity service caps"); + + options.service.identity.capabilities.service = [ + //"UNKNOWN", + "CONTROLLER_SERVICE", + //"VOLUME_ACCESSIBILITY_CONSTRAINTS" + ]; + } + + if (!("volume_expansion" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity volume_expansion caps"); + + options.service.identity.capabilities.volume_expansion = [ + //"UNKNOWN", + "ONLINE", + //"OFFLINE" + ]; + } + + if (!("rpc" in options.service.controller.capabilities)) { + this.ctx.logger.debug("setting default controller caps"); + + options.service.controller.capabilities.rpc = [ + //"UNKNOWN", + "CREATE_DELETE_VOLUME", + //"PUBLISH_UNPUBLISH_VOLUME", + //"LIST_VOLUMES", + //"GET_CAPACITY", + "CREATE_DELETE_SNAPSHOT", + //"LIST_SNAPSHOTS", + "CLONE_VOLUME", + //"PUBLISH_READONLY", + //"EXPAND_VOLUME", + ]; + } + + if (!("rpc" in options.service.node.capabilities)) { + this.ctx.logger.debug("setting default node caps"); + + options.service.node.capabilities.rpc = [ + //"UNKNOWN", + "STAGE_UNSTAGE_VOLUME", + "GET_VOLUME_STATS", + //"EXPAND_VOLUME" + ]; + } + } + + assertCapabilities(capabilities) { + const driver = this; + this.ctx.logger.verbose("validating capabilities: %j", capabilities); + + let message = null; + let fs_types = driver.getFsTypes(); + //[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}] + const valid = capabilities.every((capability) => { + if (capability.access_type != "mount") { + message = `invalid access_type ${capability.access_type}`; + return false; + } + + if ( + capability.mount.fs_type && + !fs_types.includes(capability.mount.fs_type) + ) { + message = `invalid fs_type ${capability.mount.fs_type}`; + return false; + } + + if ( + ![ + "UNKNOWN", + "SINGLE_NODE_WRITER", + "SINGLE_NODE_READER_ONLY", + "MULTI_NODE_READER_ONLY", + "MULTI_NODE_SINGLE_WRITER", + "MULTI_NODE_MULTI_WRITER", + ].includes(capability.access_mode.mode) + ) { + message = `invalid access_mode, ${capability.access_mode.mode}`; + return false; + } + + return true; + }); + + return { valid, message }; + } + // share paths + getShareBasePath() { + let config_key = this.getConfigKey(); + let path = this.options[config_key].shareBasePath; + if (!path) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing shareBasePath` + ); + } + + path = path.replace(/\/$/, ""); + if (!path) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing shareBasePath` + ); + } + + return path; + } + + // controller paths + getControllerBasePath() { + let config_key = this.getConfigKey(); + let path = this.options[config_key].controllerBasePath; + if (!path) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing controllerBasePath` + ); + } + + path = path.replace(/\/$/, ""); + if (!path) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing controllerBasePath` + ); + } + + return path; + } + + // path helpers + getVolumeExtraPath() { + return "/v"; + } + + getSnapshotExtraPath() { + return "/s"; + } + + getShareVolumeBasePath() { + return this.getShareBasePath() + this.getVolumeExtraPath(); + } + + getShareSnapshotBasePath() { + return this.getShareBasePath() + this.getSnapshotExtraPath(); + } + + getShareVolumePath(volume_id) { + return this.getShareVolumeBasePath() + "/" + volume_id; + } + + getShareSnapshotPath(snapshot_id) { + return this.getShareSnapshotBasePath() + "/" + snapshot_id; + } + + getControllerVolumeBasePath() { + return this.getControllerBasePath() + this.getVolumeExtraPath(); + } + + getControllerSnapshotBasePath() { + return this.getControllerBasePath() + this.getSnapshotExtraPath(); + } + + getControllerVolumePath(volume_id) { + return this.getControllerVolumeBasePath() + "/" + volume_id; + } + + getControllerSnapshotPath(snapshot_id) { + return this.getControllerSnapshotBasePath() + "/" + snapshot_id; + } + + exec(command, args, options = {}) { + args = args || []; + + let timeout; + let stdout = ""; + let stderr = ""; + + if (options.sudo) { + args.unshift(command); + command = "sudo"; + } + console.log("executing command: %s %s", command, args.join(" ")); + const child = cp.spawn(command, args, options); + + let didTimeout = false; + if (options && options.timeout) { + timeout = setTimeout(() => { + didTimeout = true; + child.kill(options.killSignal || "SIGTERM"); + }, options.timeout); + } + + return new Promise((resolve, reject) => { + child.stdout.on("data", function (data) { + stdout = stdout + data; + }); + + child.stderr.on("data", function (data) { + stderr = stderr + data; + }); + + child.on("close", function (code) { + const result = { code, stdout, stderr }; + if (timeout) { + clearTimeout(timeout); + } + if (code) { + reject(result); + } else { + resolve(result); + } + }); + }); + } + + stripTrailingSlash(s) { + if (s.length > 1) { + return s.replace(/\/$/, ""); + } + + return s; + } + + stripLeadingSlash(s) { + if (s.length > 1) { + return s.replace(/^\/+/, ""); + } + + return s; + } + + async cloneDir(source_path, target_path) { + await this.exec("mkdir", ["-p", target_path]); + + /** + * trailing / is important + * rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/ + */ + await this.exec("rsync", [ + "-a", + this.stripTrailingSlash(source_path) + "/", + this.stripTrailingSlash(target_path) + "/", + ]); + } + + async getAvailableSpaceAtPath(path) { + //df --output=avail /mnt/storage/ + // Avail + //1481334328 + + const response = await this.exec("df", ["--output=avail", path]); + + return response.stdout.split("\n")[1].trim(); + } + + async deleteDir(path) { + await this.exec("rm", ["-rf", path]); + + return; + + /** + * trailing / is important + * rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/ + */ + await this.exec("rsync", [ + "-a", + "--delete", + this.stripTrailingSlash(empty_path) + "/", + this.stripTrailingSlash(path) + "/", + ]); + } + + /** + * Create a volume doing in essence the following: + * 1. create directory + * + * Should return 2 parameters + * 1. `server` - host/ip of the nfs server + * 2. `share` - path of the mount shared + * + * @param {*} call + */ + async CreateVolume(call) { + const driver = this; + + let config_key = this.getConfigKey(); + let name = call.request.name; + let volume_content_source = call.request.volume_content_source; + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume name is required` + ); + } + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + if (result.valid !== true) { + throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message); + } + } + + if ( + call.request.capacity_range.required_bytes > 0 && + call.request.capacity_range.limit_bytes > 0 && + call.request.capacity_range.required_bytes > + call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required_bytes is greather than limit_bytes` + ); + } + + let capacity_bytes = + call.request.capacity_range.required_bytes || + call.request.capacity_range.limit_bytes; + + if (!capacity_bytes) { + //should never happen, value must be set + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume capacity is required (either required_bytes or limit_bytes)` + ); + } + + // ensure *actual* capacity is not greater than limit + if ( + call.request.capacity_range.limit_bytes && + call.request.capacity_range.limit_bytes > 0 && + capacity_bytes > call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required volume capacity is greater than limit` + ); + } + + const volume_path = driver.getControllerVolumePath(name); + + let response; + let source_path; + //let volume_content_source_snapshot_id; + //let volume_content_source_volume_id; + + // create target dir + response = await driver.exec("mkdir", ["-p", volume_path]); + + // create dataset + if (volume_content_source) { + switch (volume_content_source.type) { + // must be available when adverstising CREATE_DELETE_SNAPSHOT + // simply clone + case "snapshot": + source_path = driver.getControllerSnapshotPath( + volume_content_source.snapshot.snapshot_id + ); + break; + // must be available when adverstising CLONE_VOLUME + // create snapshot first, then clone + case "volume": + source_path = driver.getControllerVolumePath( + volume_content_source.volume.volume_id + ); + break; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `invalid volume_content_source type: ${volume_content_source.type}` + ); + break; + } + + driver.ctx.logger.debug("controller source path: %s", source_path); + response = await driver.cloneDir(source_path, volume_path); + } + + // set mode + if (this.options[config_key].dirPermissionsMode) { + driver.ctx.logger.verbose( + "setting dir mode to: %s on dir: %s", + this.options[config_key].dirPermissionsMode, + volume_path + ); + response = await driver.exec("chmod", [ + this.options[config_key].dirPermissionsMode, + volume_path, + ]); + } + + // set ownership + if ( + this.options[config_key].dirPermissionsUser || + this.options[config_key].dirPermissionsGroup + ) { + driver.ctx.logger.verbose( + "setting ownership to: %s:%s on dir: %s", + this.options[config_key].dirPermissionsUser, + this.options[config_key].dirPermissionsGroup, + volume_path + ); + response = await driver.exec("chown", [ + (this.options[config_key].dirPermissionsUser + ? this.options[config_key].dirPermissionsUser + : "") + + ":" + + (this.options[config_key].dirPermissionsGroup + ? this.options[config_key].dirPermissionsGroup + : ""), + volume_path, + ]); + } + + let volume_context = driver.getVolumeContext(name); + + volume_context["provisioner_driver"] = driver.options.driver; + if (driver.options.instance_id) { + volume_context["provisioner_driver_instance_id"] = + driver.options.instance_id; + } + + const res = { + volume: { + volume_id: name, + //capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0 + capacity_bytes: 0, + content_source: volume_content_source, + volume_context, + }, + }; + + return res; + } + + /** + * Delete a volume + * + * Deleting a volume consists of the following steps: + * 1. delete directory + * + * @param {*} call + */ + async DeleteVolume(call) { + const driver = this; + + let name = call.request.volume_id; + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + const volume_path = driver.getControllerVolumePath(name); + await driver.deleteDir(volume_path); + + return {}; + } + + /** + * + * @param {*} call + */ + async ControllerExpandVolume(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + } + + /** + * TODO: consider volume_capabilities? + * + * @param {*} call + */ + async GetCapacity(call) { + // really capacity is not used at all with nfs in this fashion, so no reason to enable + // here even though it is technically feasible. + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + + const driver = this; + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { available_capacity: 0 }; + } + } + + const available_capacity = await driver.getAvailableSpaceAtPath( + driver.getControllerBasePath() + ); + return { available_capacity }; + } + + /** + * + * TODO: check capability to ensure not asking about block volumes + * + * @param {*} call + */ + async ListVolumes(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + } + + /** + * + * @param {*} call + */ + async ListSnapshots(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + } + + /** + * + * @param {*} call + */ + async CreateSnapshot(call) { + const driver = this; + + // both these are required + let source_volume_id = call.request.source_volume_id; + let name = call.request.name; + + if (!source_volume_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot source_volume_id is required` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name is required` + ); + } + + driver.ctx.logger.verbose("requested snapshot name: %s", name); + + let invalid_chars; + invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi); + if (invalid_chars) { + invalid_chars = String.prototype.concat( + ...new Set(invalid_chars.join("")) + ); + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name contains invalid characters: ${invalid_chars}` + ); + } + + // https://stackoverflow.com/questions/32106243/regex-to-remove-all-non-alpha-numeric-and-replace-spaces-with/32106277 + name = name.replace(/[^a-z0-9_\-:.+]+/gi, ""); + + driver.ctx.logger.verbose("cleansed snapshot name: %s", name); + + const snapshot_id = `${source_volume_id}-${name}`; + const volume_path = driver.getControllerVolumePath(source_volume_id); + const snapshot_path = driver.getControllerSnapshotPath(snapshot_id); + + await driver.cloneDir(volume_path, snapshot_path); + + return { + snapshot: { + /** + * The purpose of this field is to give CO guidance on how much space + * is needed to create a volume from this snapshot. + */ + size_bytes: 0, + snapshot_id, + source_volume_id: source_volume_id, + //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto + creation_time: { + seconds: Math.round(new Date().getTime() / 1000), + nanos: 0, + }, + ready_to_use: true, + }, + }; + } + + /** + * In addition, if clones have been created from a snapshot, then they must + * be destroyed before the snapshot can be destroyed. + * + * @param {*} call + */ + async DeleteSnapshot(call) { + const driver = this; + + const snapshot_id = call.request.snapshot_id; + + if (!snapshot_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot_id is required` + ); + } + + const snapshot_path = driver.getControllerSnapshotPath(snapshot_id); + await driver.deleteDir(snapshot_path); + + return {}; + } + + /** + * + * @param {*} call + */ + async ValidateVolumeCapabilities(call) { + const driver = this; + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { message: result.message }; + } + + return { + confirmed: { + volume_context: call.request.volume_context, + volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested + parameters: call.request.parameters, + }, + }; + } +} + +module.exports.ControllerClientCommonDriver = ControllerClientCommonDriver; diff --git a/src/driver/controller-nfs-client/index.js b/src/driver/controller-nfs-client/index.js index ebe68b8..eb17efb 100644 --- a/src/driver/controller-nfs-client/index.js +++ b/src/driver/controller-nfs-client/index.js @@ -1,663 +1,30 @@ -const { CsiBaseDriver } = require("../index"); -const { GrpcError, grpc } = require("../../utils/grpc"); -const cp = require("child_process"); -const { Mount } = require("../../utils/mount"); +const { ControllerClientCommonDriver } = require("../controller-client-common"); /** * Crude nfs-client driver which simply creates directories to be mounted * and uses rsync for cloning/snapshots */ -class ControllerNfsClientDriver extends CsiBaseDriver { +class ControllerNfsClientDriver extends ControllerClientCommonDriver { constructor(ctx, options) { super(...arguments); - - options = options || {}; - options.service = options.service || {}; - options.service.identity = options.service.identity || {}; - options.service.controller = options.service.controller || {}; - options.service.node = options.service.node || {}; - - options.service.identity.capabilities = - options.service.identity.capabilities || {}; - - options.service.controller.capabilities = - options.service.controller.capabilities || {}; - - options.service.node.capabilities = options.service.node.capabilities || {}; - - if (!("service" in options.service.identity.capabilities)) { - this.ctx.logger.debug("setting default identity service caps"); - - options.service.identity.capabilities.service = [ - //"UNKNOWN", - "CONTROLLER_SERVICE", - //"VOLUME_ACCESSIBILITY_CONSTRAINTS" - ]; - } - - if (!("volume_expansion" in options.service.identity.capabilities)) { - this.ctx.logger.debug("setting default identity volume_expansion caps"); - - options.service.identity.capabilities.volume_expansion = [ - //"UNKNOWN", - "ONLINE", - //"OFFLINE" - ]; - } - - if (!("rpc" in options.service.controller.capabilities)) { - this.ctx.logger.debug("setting default controller caps"); - - options.service.controller.capabilities.rpc = [ - //"UNKNOWN", - "CREATE_DELETE_VOLUME", - //"PUBLISH_UNPUBLISH_VOLUME", - //"LIST_VOLUMES", - //"GET_CAPACITY", - "CREATE_DELETE_SNAPSHOT", - //"LIST_SNAPSHOTS", - "CLONE_VOLUME", - //"PUBLISH_READONLY", - //"EXPAND_VOLUME", - ]; - } - - if (!("rpc" in options.service.node.capabilities)) { - this.ctx.logger.debug("setting default node caps"); - - options.service.node.capabilities.rpc = [ - //"UNKNOWN", - "STAGE_UNSTAGE_VOLUME", - "GET_VOLUME_STATS", - //"EXPAND_VOLUME" - ]; - } } - assertCapabilities(capabilities) { - this.ctx.logger.verbose("validating capabilities: %j", capabilities); - - let message = null; - //[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}] - const valid = capabilities.every((capability) => { - if (capability.access_type != "mount") { - message = `invalid access_type ${capability.access_type}`; - return false; - } - - if ( - capability.mount.fs_type && - !["nfs"].includes(capability.mount.fs_type) - ) { - message = `invalid fs_type ${capability.mount.fs_type}`; - return false; - } - - if ( - ![ - "UNKNOWN", - "SINGLE_NODE_WRITER", - "SINGLE_NODE_READER_ONLY", - "MULTI_NODE_READER_ONLY", - "MULTI_NODE_SINGLE_WRITER", - "MULTI_NODE_MULTI_WRITER", - ].includes(capability.access_mode.mode) - ) { - message = `invalid access_mode, ${capability.access_mode.mode}`; - return false; - } - - return true; - }); - - return { valid, message }; + getConfigKey() { + return "nfs"; } - // path helpers - getVolumeExtraPath() { - return "/v"; - } - - getSnapshotExtraPath() { - return "/s"; - } - - // share paths - getShareBasePath() { - let path = this.options.nfs.shareBasePath; - if (!path) { - throw new GrpcError( - grpc.status.FAILED_PRECONDITION, - `invalid configuration: missing shareBasePath` - ); - } - - path = path.replace(/\/$/, ""); - if (!path) { - throw new GrpcError( - grpc.status.FAILED_PRECONDITION, - `invalid configuration: missing shareBasePath` - ); - } - - return path; - } - - getShareVolumeBasePath() { - return this.getShareBasePath() + this.getVolumeExtraPath(); - } - - getShareSnapshotBasePath() { - return this.getShareBasePath() + this.getSnapshotExtraPath(); - } - - getShareVolumePath(volume_id) { - return this.getShareVolumeBasePath() + "/" + volume_id; - } - - getShareSnapshotPath(snapshot_id) { - return this.getShareSnapshotBasePath() + "/" + snapshot_id; - } - - // controller paths - getControllerBasePath() { - let path = this.options.nfs.controllerBasePath; - if (!path) { - throw new GrpcError( - grpc.status.FAILED_PRECONDITION, - `invalid configuration: missing controllerBasePath` - ); - } - - path = path.replace(/\/$/, ""); - if (!path) { - throw new GrpcError( - grpc.status.FAILED_PRECONDITION, - `invalid configuration: missing controllerBasePath` - ); - } - - return path; - } - - getControllerVolumeBasePath() { - return this.getControllerBasePath() + this.getVolumeExtraPath(); - } - - getControllerSnapshotBasePath() { - return this.getControllerBasePath() + this.getSnapshotExtraPath(); - } - - getControllerVolumePath(volume_id) { - return this.getControllerVolumeBasePath() + "/" + volume_id; - } - - getControllerSnapshotPath(snapshot_id) { - return this.getControllerSnapshotBasePath() + "/" + snapshot_id; - } - - exec(command, args, options = {}) { - args = args || []; - - let timeout; - let stdout = ""; - let stderr = ""; - - if (options.sudo) { - args.unshift(command); - command = "sudo"; - } - console.log("executing command: %s %s", command, args.join(" ")); - const child = cp.spawn(command, args, options); - - let didTimeout = false; - if (options && options.timeout) { - timeout = setTimeout(() => { - didTimeout = true; - child.kill(options.killSignal || "SIGTERM"); - }, options.timeout); - } - - return new Promise((resolve, reject) => { - child.stdout.on("data", function (data) { - stdout = stdout + data; - }); - - child.stderr.on("data", function (data) { - stderr = stderr + data; - }); - - child.on("close", function (code) { - const result = { code, stdout, stderr }; - if (timeout) { - clearTimeout(timeout); - } - if (code) { - reject(result); - } else { - resolve(result); - } - }); - }); - } - - stripTrailingSlash(s) { - if (s.length > 1) { - return s.replace(/\/$/, ""); - } - - return s; - } - - async cloneDir(source_path, target_path) { - await this.exec("mkdir", ["-p", target_path]); - - /** - * trailing / is important - * rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/ - */ - await this.exec("rsync", [ - "-a", - this.stripTrailingSlash(source_path) + "/", - this.stripTrailingSlash(target_path) + "/", - ]); - } - - async getAvailableSpaceAtPath(path) { - //df --output=avail /mnt/storage/ - // Avail - //1481334328 - - const response = await this.exec("df", ["--output=avail", path]); - - return response.stdout.split("\n")[1].trim(); - } - - async deleteDir(path) { - await this.exec("rm", ["-rf", path]); - - return; - - /** - * trailing / is important - * rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/ - */ - await this.exec("rsync", [ - "-a", - "--delete", - this.stripTrailingSlash(empty_path) + "/", - this.stripTrailingSlash(path) + "/", - ]); - } - - /** - * Create a volume doing in essence the following: - * 1. create directory - * - * Should return 2 parameters - * 1. `server` - host/ip of the nfs server - * 2. `share` - path of the mount shared - * - * @param {*} call - */ - async CreateVolume(call) { + getVolumeContext(name) { const driver = this; - - let name = call.request.name; - let volume_content_source = call.request.volume_content_source; - - if (!name) { - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `volume name is required` - ); - } - - if (call.request.volume_capabilities) { - const result = this.assertCapabilities(call.request.volume_capabilities); - if (result.valid !== true) { - throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message); - } - } - - if ( - call.request.capacity_range.required_bytes > 0 && - call.request.capacity_range.limit_bytes > 0 && - call.request.capacity_range.required_bytes > - call.request.capacity_range.limit_bytes - ) { - throw new GrpcError( - grpc.status.OUT_OF_RANGE, - `required_bytes is greather than limit_bytes` - ); - } - - let capacity_bytes = - call.request.capacity_range.required_bytes || - call.request.capacity_range.limit_bytes; - - if (!capacity_bytes) { - //should never happen, value must be set - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `volume capacity is required (either required_bytes or limit_bytes)` - ); - } - - // ensure *actual* capacity is not greater than limit - if ( - call.request.capacity_range.limit_bytes && - call.request.capacity_range.limit_bytes > 0 && - capacity_bytes > call.request.capacity_range.limit_bytes - ) { - throw new GrpcError( - grpc.status.OUT_OF_RANGE, - `required volume capacity is greater than limit` - ); - } - - const volume_path = driver.getControllerVolumePath(name); - - let response; - let source_path; - //let volume_content_source_snapshot_id; - //let volume_content_source_volume_id; - - // create target dir - response = await driver.exec("mkdir", ["-p", volume_path]); - - // create dataset - if (volume_content_source) { - switch (volume_content_source.type) { - // must be available when adverstising CREATE_DELETE_SNAPSHOT - // simply clone - case "snapshot": - source_path = driver.getControllerSnapshotPath( - volume_content_source.snapshot.snapshot_id - ); - break; - // must be available when adverstising CLONE_VOLUME - // create snapshot first, then clone - case "volume": - source_path = driver.getControllerVolumePath( - volume_content_source.volume.volume_id - ); - break; - default: - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `invalid volume_content_source type: ${volume_content_source.type}` - ); - break; - } - - driver.ctx.logger.debug("controller source path: %s", source_path); - response = await driver.cloneDir(source_path, volume_path); - } - - // set mode - if (this.options.nfs.dirPermissionsMode) { - driver.ctx.logger.verbose( - "setting dir mode to: %s on dir: %s", - this.options.nfs.dirPermissionsMode, - volume_path - ); - response = await driver.exec("chmod", [ - this.options.nfs.dirPermissionsMode, - volume_path, - ]); - } - - // set ownership - if ( - this.options.nfs.dirPermissionsUser || - this.options.nfs.dirPermissionsGroup - ) { - driver.ctx.logger.verbose( - "setting ownership to: %s:%s on dir: %s", - this.options.nfs.dirPermissionsUser, - this.options.nfs.dirPermissionsGroup, - volume_path - ); - response = await driver.exec("chown", [ - (this.options.nfs.dirPermissionsUser - ? this.options.nfs.dirPermissionsUser - : "") + - ":" + - (this.options.nfs.dirPermissionsGroup - ? this.options.nfs.dirPermissionsGroup - : ""), - volume_path, - ]); - } - - let volume_context = { + const config_key = driver.getConfigKey(); + return { node_attach_driver: "nfs", - server: this.options.nfs.shareHost, + server: this.options[config_key].shareHost, share: driver.getShareVolumePath(name), }; - - volume_context["provisioner_driver"] = driver.options.driver; - if (driver.options.instance_id) { - volume_context["provisioner_driver_instance_id"] = - driver.options.instance_id; - } - - const res = { - volume: { - volume_id: name, - //capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0 - capacity_bytes: 0, - content_source: volume_content_source, - volume_context, - }, - }; - - return res; } - /** - * Delete a volume - * - * Deleting a volume consists of the following steps: - * 1. delete directory - * - * @param {*} call - */ - async DeleteVolume(call) { - const driver = this; - - let name = call.request.volume_id; - - if (!name) { - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `volume_id is required` - ); - } - - const volume_path = driver.getControllerVolumePath(name); - await driver.deleteDir(volume_path); - - return {}; - } - - /** - * - * @param {*} call - */ - async ControllerExpandVolume(call) { - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` - ); - } - - /** - * TODO: consider volume_capabilities? - * - * @param {*} call - */ - async GetCapacity(call) { - // really capacity is not used at all with nfs in this fashion, so no reason to enable - // here even though it is technically feasible. - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` - ); - - const driver = this; - - if (call.request.volume_capabilities) { - const result = this.assertCapabilities(call.request.volume_capabilities); - - if (result.valid !== true) { - return { available_capacity: 0 }; - } - } - - const available_capacity = await driver.getAvailableSpaceAtPath( - driver.getControllerBasePath() - ); - return { available_capacity }; - } - - /** - * - * TODO: check capability to ensure not asking about block volumes - * - * @param {*} call - */ - async ListVolumes(call) { - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` - ); - } - - /** - * - * @param {*} call - */ - async ListSnapshots(call) { - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` - ); - } - - /** - * - * @param {*} call - */ - async CreateSnapshot(call) { - const driver = this; - - // both these are required - let source_volume_id = call.request.source_volume_id; - let name = call.request.name; - - if (!source_volume_id) { - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `snapshot source_volume_id is required` - ); - } - - if (!name) { - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `snapshot name is required` - ); - } - - driver.ctx.logger.verbose("requested snapshot name: %s", name); - - let invalid_chars; - invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi); - if (invalid_chars) { - invalid_chars = String.prototype.concat( - ...new Set(invalid_chars.join("")) - ); - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `snapshot name contains invalid characters: ${invalid_chars}` - ); - } - - // https://stackoverflow.com/questions/32106243/regex-to-remove-all-non-alpha-numeric-and-replace-spaces-with/32106277 - name = name.replace(/[^a-z0-9_\-:.+]+/gi, ""); - - driver.ctx.logger.verbose("cleansed snapshot name: %s", name); - - const snapshot_id = `${source_volume_id}-${name}`; - const volume_path = driver.getControllerVolumePath(source_volume_id); - const snapshot_path = driver.getControllerSnapshotPath(snapshot_id); - - await driver.cloneDir(volume_path, snapshot_path); - - return { - snapshot: { - /** - * The purpose of this field is to give CO guidance on how much space - * is needed to create a volume from this snapshot. - */ - size_bytes: 0, - snapshot_id, - source_volume_id: source_volume_id, - //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto - creation_time: { - seconds: Math.round(new Date().getTime() / 1000), - nanos: 0, - }, - ready_to_use: true, - }, - }; - } - - /** - * In addition, if clones have been created from a snapshot, then they must - * be destroyed before the snapshot can be destroyed. - * - * @param {*} call - */ - async DeleteSnapshot(call) { - const driver = this; - - const snapshot_id = call.request.snapshot_id; - - if (!snapshot_id) { - throw new GrpcError( - grpc.status.INVALID_ARGUMENT, - `snapshot_id is required` - ); - } - - const snapshot_path = driver.getControllerSnapshotPath(snapshot_id); - await driver.deleteDir(snapshot_path); - - return {}; - } - - /** - * - * @param {*} call - */ - async ValidateVolumeCapabilities(call) { - const driver = this; - const result = this.assertCapabilities(call.request.volume_capabilities); - - if (result.valid !== true) { - return { message: result.message }; - } - - return { - confirmed: { - volume_context: call.request.volume_context, - volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested - parameters: call.request.parameters, - }, - }; + getFsTypes() { + return ["nfs"]; } } diff --git a/src/driver/controller-smb-client/index.js b/src/driver/controller-smb-client/index.js new file mode 100644 index 0000000..e521c31 --- /dev/null +++ b/src/driver/controller-smb-client/index.js @@ -0,0 +1,31 @@ +const { ControllerClientCommonDriver } = require("../controller-client-common"); + +/** + * Crude smb-client driver which simply creates directories to be mounted + * and uses rsync for cloning/snapshots + */ +class ControllerSmbClientDriver extends ControllerClientCommonDriver { + constructor(ctx, options) { + super(...arguments); + } + + getConfigKey() { + return "smb"; + } + + getVolumeContext(name) { + const driver = this; + const config_key = driver.getConfigKey(); + return { + node_attach_driver: "smb", + server: this.options[config_key].shareHost, + share: driver.stripLeadingSlash(driver.getShareVolumePath(name)), + }; + } + + getFsTypes() { + return ["cifs"]; + } +} + +module.exports.ControllerSmbClientDriver = ControllerSmbClientDriver; diff --git a/src/driver/factory.js b/src/driver/factory.js index e1078f0..917a1bc 100644 --- a/src/driver/factory.js +++ b/src/driver/factory.js @@ -5,6 +5,7 @@ const { } = require("./zfs-local-ephemeral-inline"); const { ControllerNfsClientDriver } = require("./controller-nfs-client"); +const { ControllerSmbClientDriver } = require("./controller-smb-client"); const { NodeManualDriver } = require("./node-manual"); function factory(ctx, options) { @@ -21,6 +22,8 @@ function factory(ctx, options) { return new ControllerZfsGenericDriver(ctx, options); case "zfs-local-ephemeral-inline": return new ZfsLocalEphemeralInlineDriver(ctx, options); + case "smb-client": + return new ControllerSmbClientDriver(ctx, options); case "nfs-client": return new ControllerNfsClientDriver(ctx, options); case "node-manual": From 8765da65c45e37fff54fb259d0f4b28cb7f6ccb0 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sat, 15 May 2021 08:39:19 -0600 Subject: [PATCH 02/44] better doc and handling of portals Signed-off-by: Travis Glenn Hansen --- README.md | 4 +++- examples/freenas-iscsi.yaml | 5 +++-- examples/zfs-generic-iscsi.yaml | 6 ++++-- src/driver/controller-zfs-generic/index.js | 4 +++- src/driver/freenas/index.js | 20 +++----------------- 5 files changed, 16 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 47d13fd..c64c038 100644 --- a/README.md +++ b/README.md @@ -42,11 +42,13 @@ Predominantly 3 things are needed: - deploy the driver into the cluster (`helm` chart provided with sample `values.yaml`) -## Guides +## Community Guides - https://jonathangazeley.com/2021/01/05/using-truenas-to-provide-persistent-storage-for-kubernetes/ - https://gist.github.com/admun/4372899f20421a947b7544e5fc9f9117 (migrating from `nfs-client-provisioner` to `democratic-csi`) +- https://gist.github.com/deefdragon/d58a4210622ff64088bd62a5d8a4e8cc + (migrating between storage classes using `velero`) ## Node Prep diff --git a/examples/freenas-iscsi.yaml b/examples/freenas-iscsi.yaml index 15413b5..1e67356 100644 --- a/examples/freenas-iscsi.yaml +++ b/examples/freenas-iscsi.yaml @@ -60,8 +60,9 @@ zfs: # 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K zvolBlocksize: iscsi: - targetPortal: "server:3261" - targetPortals: [] + targetPortal: "server[:port]" + # for multipath + targetPortals: [] # [ "server[:port]", "server[:port]", ... ] # leave empty to omit usage of -I with iscsiadm interface: diff --git a/examples/zfs-generic-iscsi.yaml b/examples/zfs-generic-iscsi.yaml index e529ff2..7e0d13f 100644 --- a/examples/zfs-generic-iscsi.yaml +++ b/examples/zfs-generic-iscsi.yaml @@ -73,8 +73,10 @@ iscsi: # mutual CHAP #mutual_userid: "baz" #mutual_password: "bar" - targetPortal: "server address" - targetPortals: [] + targetPortal: "server[:port]" + # for multipath + targetPortals: [] # [ "server[:port]", "server[:port]", ... ] + # leave empty to omit usage of -I with iscsiadm interface: "" # MUST ensure uniqueness diff --git a/src/driver/controller-zfs-generic/index.js b/src/driver/controller-zfs-generic/index.js index 71173cf..09d0c7b 100644 --- a/src/driver/controller-zfs-generic/index.js +++ b/src/driver/controller-zfs-generic/index.js @@ -169,7 +169,9 @@ create /backstores/block/${iscsiName} volume_context = { node_attach_driver: "iscsi", portal: this.options.iscsi.targetPortal, - portals: this.options.iscsi.targetPortals.join(","), + portals: this.options.iscsi.targetPortals + ? this.options.iscsi.targetPortals.join(",") + : "", interface: this.options.iscsi.interface, iqn: iqn, lun: 0, diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index e53721d..e9987bc 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -1232,27 +1232,13 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { [FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME]: iscsiName, }); - // iscsiadm -m discovery -t st -p 172.21.26.81 - // iscsiadm -m node -T iqn.2011-03.lan.bitness.istgt:test -p bitness.lan -l - - // FROM driver config? no, node attachment should have everything required to remain independent - // portal - // portals - // interface - // chap discovery - // chap session - - // FROM context - // iqn - // lun - volume_context = { node_attach_driver: "iscsi", portal: this.options.iscsi.targetPortal, - portals: this.options.iscsi.targetPortals.join(","), + portals: this.options.iscsi.targetPortals + ? this.options.iscsi.targetPortals.join(",") + : "", interface: this.options.iscsi.interface || "", - //chapDiscoveryEnabled: this.options.iscsi.chapDiscoveryEnabled, - //chapSessionEnabled: this.options.iscsi.chapSessionEnabled, iqn: iqn, lun: 0, }; From 8b6e12dd77a3a8eae0bb2042cecd4111281d3a7f Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 3 Jun 2021 17:20:01 -0600 Subject: [PATCH 03/44] freenas overhaul, synology shell Signed-off-by: Travis Glenn Hansen --- src/driver/controller-client-common/index.js | 1 - src/driver/controller-synology/index.js | 465 +++ src/driver/factory.js | 14 +- src/driver/freenas/api.js | 2798 ++++++++++++++++++ src/driver/freenas/http/api.js | 705 +++++ src/driver/freenas/{index.js => ssh.js} | 0 6 files changed, 3980 insertions(+), 3 deletions(-) create mode 100644 src/driver/controller-synology/index.js create mode 100644 src/driver/freenas/api.js create mode 100644 src/driver/freenas/http/api.js rename src/driver/freenas/{index.js => ssh.js} (100%) diff --git a/src/driver/controller-client-common/index.js b/src/driver/controller-client-common/index.js index 64e0596..9fe616b 100644 --- a/src/driver/controller-client-common/index.js +++ b/src/driver/controller-client-common/index.js @@ -1,7 +1,6 @@ const { CsiBaseDriver } = require("../index"); const { GrpcError, grpc } = require("../../utils/grpc"); const cp = require("child_process"); -const { Mount } = require("../../utils/mount"); /** * Crude nfs-client driver which simply creates directories to be mounted diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js new file mode 100644 index 0000000..143dd4e --- /dev/null +++ b/src/driver/controller-synology/index.js @@ -0,0 +1,465 @@ +const { CsiBaseDriver } = require("../index"); +const { GrpcError, grpc } = require("../../utils/grpc"); + +/** + * + * Driver to provision storage on a synology device + * + */ +class ControllerSynologyDriver extends CsiBaseDriver { + constructor(ctx, options) { + super(...arguments); + + options = options || {}; + options.service = options.service || {}; + options.service.identity = options.service.identity || {}; + options.service.controller = options.service.controller || {}; + options.service.node = options.service.node || {}; + + options.service.identity.capabilities = + options.service.identity.capabilities || {}; + + options.service.controller.capabilities = + options.service.controller.capabilities || {}; + + options.service.node.capabilities = options.service.node.capabilities || {}; + + if (!("service" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity service caps"); + + options.service.identity.capabilities.service = [ + //"UNKNOWN", + "CONTROLLER_SERVICE", + //"VOLUME_ACCESSIBILITY_CONSTRAINTS" + ]; + } + + if (!("volume_expansion" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity volume_expansion caps"); + + options.service.identity.capabilities.volume_expansion = [ + //"UNKNOWN", + "ONLINE", + //"OFFLINE" + ]; + } + + if (!("rpc" in options.service.controller.capabilities)) { + this.ctx.logger.debug("setting default controller caps"); + + options.service.controller.capabilities.rpc = [ + //"UNKNOWN", + "CREATE_DELETE_VOLUME", + //"PUBLISH_UNPUBLISH_VOLUME", + //"LIST_VOLUMES", + //"GET_CAPACITY", + //"CREATE_DELETE_SNAPSHOT", + //"LIST_SNAPSHOTS", + //"CLONE_VOLUME", + //"PUBLISH_READONLY", + //"EXPAND_VOLUME", + ]; + } + + if (!("rpc" in options.service.node.capabilities)) { + this.ctx.logger.debug("setting default node caps"); + + options.service.node.capabilities.rpc = [ + //"UNKNOWN", + "STAGE_UNSTAGE_VOLUME", + "GET_VOLUME_STATS", + //"EXPAND_VOLUME" + ]; + } + } + + getDriverResourceType() { + switch (this.options.driver) { + case "synology-nfs": + case "synology-smb": + return "filesystem"; + case "synology-iscsi": + return "volume"; + default: + throw new Error("unknown driver: " + this.ctx.args.driver); + } + } + + getDriverShareType() { + switch (this.options.driver) { + case "synology-nfs": + return "nfs"; + case "synology-smb": + return "smb"; + case "synology-iscsi": + return "iscsi"; + default: + throw new Error("unknown driver: " + this.ctx.args.driver); + } + } + + assertCapabilities(capabilities) { + const driverResourceType = this.getDriverResourceType(); + this.ctx.logger.verbose("validating capabilities: %j", capabilities); + + let message = null; + //[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}] + const valid = capabilities.every((capability) => { + switch (driverResourceType) { + case "filesystem": + if (capability.access_type != "mount") { + message = `invalid access_type ${capability.access_type}`; + return false; + } + + if ( + capability.mount.fs_type && + !["nfs", "cifs"].includes(capability.mount.fs_type) + ) { + message = `invalid fs_type ${capability.mount.fs_type}`; + return false; + } + + if ( + ![ + "UNKNOWN", + "SINGLE_NODE_WRITER", + "SINGLE_NODE_READER_ONLY", + "MULTI_NODE_READER_ONLY", + "MULTI_NODE_SINGLE_WRITER", + "MULTI_NODE_MULTI_WRITER", + ].includes(capability.access_mode.mode) + ) { + message = `invalid access_mode, ${capability.access_mode.mode}`; + return false; + } + + return true; + case "volume": + if (capability.access_type == "mount") { + if ( + capability.mount.fs_type && + !["ext3", "ext4", "ext4dev", "xfs"].includes( + capability.mount.fs_type + ) + ) { + message = `invalid fs_type ${capability.mount.fs_type}`; + return false; + } + } + + if ( + ![ + "UNKNOWN", + "SINGLE_NODE_WRITER", + "SINGLE_NODE_READER_ONLY", + "MULTI_NODE_READER_ONLY", + "MULTI_NODE_SINGLE_WRITER", + ].includes(capability.access_mode.mode) + ) { + message = `invalid access_mode, ${capability.access_mode.mode}`; + return false; + } + + return true; + } + }); + + return { valid, message }; + } + + /** + * + * CreateVolume + * + * @param {*} call + */ + async CreateVolume(call) { + const driver = this; + + let name = call.request.name; + let volume_content_source = call.request.volume_content_source; + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume name is required` + ); + } + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + if (result.valid !== true) { + throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message); + } + } + + if ( + call.request.capacity_range.required_bytes > 0 && + call.request.capacity_range.limit_bytes > 0 && + call.request.capacity_range.required_bytes > + call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required_bytes is greather than limit_bytes` + ); + } + + let capacity_bytes = + call.request.capacity_range.required_bytes || + call.request.capacity_range.limit_bytes; + + if (!capacity_bytes) { + //should never happen, value must be set + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume capacity is required (either required_bytes or limit_bytes)` + ); + } + + // ensure *actual* capacity is not greater than limit + if ( + call.request.capacity_range.limit_bytes && + call.request.capacity_range.limit_bytes > 0 && + capacity_bytes > call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required volume capacity is greater than limit` + ); + } + + switch (driver.getDriverShareType()) { + case "nfs": + // TODO: create volume here + break; + case "smb": + // TODO: create volume here + break; + case "iscsi": + // TODO: create volume here + break; + default: + // throw an error + break; + } + + let volume_context = driver.getVolumeContext(name); + + volume_context["provisioner_driver"] = driver.options.driver; + if (driver.options.instance_id) { + volume_context["provisioner_driver_instance_id"] = + driver.options.instance_id; + } + + const res = { + volume: { + volume_id: name, + //capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0 + capacity_bytes: 0, + content_source: volume_content_source, + volume_context, + }, + }; + + return res; + } + + /** + * DeleteVolume + * + * @param {*} call + */ + async DeleteVolume(call) { + const driver = this; + + let name = call.request.volume_id; + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + switch (driver.getDriverShareType()) { + case "nfs": + // TODO: delete volume here + break; + case "smb": + // TODO: delete volume here + break; + case "iscsi": + // TODO: delete volume here + break; + default: + // throw an error + break; + } + + return {}; + } + + /** + * + * @param {*} call + */ + async ControllerExpandVolume(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + } + + /** + * TODO: consider volume_capabilities? + * + * @param {*} call + */ + async GetCapacity(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + } + + /** + * + * TODO: check capability to ensure not asking about block volumes + * + * @param {*} call + */ + async ListVolumes(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + } + + /** + * + * @param {*} call + */ + async ListSnapshots(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + } + + /** + * + * @param {*} call + */ + async CreateSnapshot(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + const driver = this; + + // both these are required + let source_volume_id = call.request.source_volume_id; + let name = call.request.name; + + if (!source_volume_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot source_volume_id is required` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name is required` + ); + } + + driver.ctx.logger.verbose("requested snapshot name: %s", name); + + let invalid_chars; + invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi); + if (invalid_chars) { + invalid_chars = String.prototype.concat( + ...new Set(invalid_chars.join("")) + ); + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name contains invalid characters: ${invalid_chars}` + ); + } + + // TODO: create snapshot here + + return { + snapshot: { + /** + * The purpose of this field is to give CO guidance on how much space + * is needed to create a volume from this snapshot. + */ + size_bytes: 0, + snapshot_id, + source_volume_id: source_volume_id, + //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto + creation_time: { + seconds: Math.round(new Date().getTime() / 1000), + nanos: 0, + }, + ready_to_use: true, + }, + }; + } + + /** + * In addition, if clones have been created from a snapshot, then they must + * be destroyed before the snapshot can be destroyed. + * + * @param {*} call + */ + async DeleteSnapshot(call) { + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + + const driver = this; + + const snapshot_id = call.request.snapshot_id; + + if (!snapshot_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot_id is required` + ); + } + + // TODO: delete snapshot here + + return {}; + } + + /** + * + * @param {*} call + */ + async ValidateVolumeCapabilities(call) { + const driver = this; + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { message: result.message }; + } + + return { + confirmed: { + volume_context: call.request.volume_context, + volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested + parameters: call.request.parameters, + }, + }; + } +} + +module.exports.ControllerSynologyDriver = ControllerSynologyDriver; diff --git a/src/driver/factory.js b/src/driver/factory.js index 917a1bc..f10f9b5 100644 --- a/src/driver/factory.js +++ b/src/driver/factory.js @@ -1,4 +1,5 @@ -const { FreeNASDriver } = require("./freenas"); +const { FreeNASSshDriver } = require("./freenas/ssh"); +const { FreeNASApiDriver } = require("./freenas/api"); const { ControllerZfsGenericDriver } = require("./controller-zfs-generic"); const { ZfsLocalEphemeralInlineDriver, @@ -6,6 +7,7 @@ const { const { ControllerNfsClientDriver } = require("./controller-nfs-client"); const { ControllerSmbClientDriver } = require("./controller-smb-client"); +const { ControllerSynologyDriver } = require("./controller-synology"); const { NodeManualDriver } = require("./node-manual"); function factory(ctx, options) { @@ -16,7 +18,15 @@ function factory(ctx, options) { case "truenas-nfs": case "truenas-smb": case "truenas-iscsi": - return new FreeNASDriver(ctx, options); + return new FreeNASSshDriver(ctx, options); + case "freenas-api-iscsi": + case "freenas-api-nfs": + case "freenas-api-smb": + return new FreeNASApiDriver(ctx, options); + case "synology-nfs": + case "synology-smb": + case "synology-iscsi": + return new ControllerSynologyDriver(ctx, options); case "zfs-generic-nfs": case "zfs-generic-iscsi": return new ControllerZfsGenericDriver(ctx, options); diff --git a/src/driver/freenas/api.js b/src/driver/freenas/api.js new file mode 100644 index 0000000..637b1d2 --- /dev/null +++ b/src/driver/freenas/api.js @@ -0,0 +1,2798 @@ +const { GrpcError, grpc } = require("../../utils/grpc"); +const { CsiBaseDriver } = require("../index"); +const HttpClient = require("./http").Client; +const TrueNASApiClient = require("./http/api").Api; +const { Zetabyte } = require("../../utils/zfs"); + +const Handlebars = require("handlebars"); +const uuidv4 = require("uuid").v4; +const semver = require("semver"); + +// freenas properties +const FREENAS_NFS_SHARE_PROPERTY_NAME = "democratic-csi:freenas_nfs_share_id"; +const FREENAS_SMB_SHARE_PROPERTY_NAME = "democratic-csi:freenas_smb_share_id"; +const FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_target_id"; +const FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_extent_id"; +const FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_targettoextent_id"; +const FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_assets_name"; + +// zfs common properties +const MANAGED_PROPERTY_NAME = "democratic-csi:managed_resource"; +const SUCCESS_PROPERTY_NAME = "democratic-csi:provision_success"; +const VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX = "volume-source-for-volume-"; +const VOLUME_SOURCE_DETACHED_SNAPSHOT_PREFIX = "volume-source-for-snapshot-"; +const VOLUME_CSI_NAME_PROPERTY_NAME = "democratic-csi:csi_volume_name"; +const SHARE_VOLUME_CONTEXT_PROPERTY_NAME = + "democratic-csi:csi_share_volume_context"; +const VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME = + "democratic-csi:csi_volume_content_source_type"; +const VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME = + "democratic-csi:csi_volume_content_source_id"; +const SNAPSHOT_CSI_NAME_PROPERTY_NAME = "democratic-csi:csi_snapshot_name"; +const SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME = + "democratic-csi:csi_snapshot_source_volume_id"; + +const VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME = + "democratic-csi:volume_context_provisioner_driver"; +const VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME = + "democratic-csi:volume_context_provisioner_instance_id"; + +function isPropertyValueSet(value) { + if (value === undefined || value === null || value == "" || value == "-") { + return false; + } + + return true; +} + +class FreeNASApiDriver extends CsiBaseDriver { + constructor(ctx, options) { + super(...arguments); + + options = options || {}; + options.service = options.service || {}; + options.service.identity = options.service.identity || {}; + options.service.controller = options.service.controller || {}; + options.service.node = options.service.node || {}; + + options.service.identity.capabilities = + options.service.identity.capabilities || {}; + + options.service.controller.capabilities = + options.service.controller.capabilities || {}; + + options.service.node.capabilities = options.service.node.capabilities || {}; + + if (!("service" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity service caps"); + + options.service.identity.capabilities.service = [ + //"UNKNOWN", + "CONTROLLER_SERVICE", + //"VOLUME_ACCESSIBILITY_CONSTRAINTS" + ]; + } + + if (!("volume_expansion" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity volume_expansion caps"); + + options.service.identity.capabilities.volume_expansion = [ + //"UNKNOWN", + "ONLINE", + //"OFFLINE" + ]; + } + + if (!("rpc" in options.service.controller.capabilities)) { + this.ctx.logger.debug("setting default controller caps"); + + options.service.controller.capabilities.rpc = [ + //"UNKNOWN", + "CREATE_DELETE_VOLUME", + //"PUBLISH_UNPUBLISH_VOLUME", + //"LIST_VOLUMES_PUBLISHED_NODES", + "LIST_VOLUMES", + "GET_CAPACITY", + "CREATE_DELETE_SNAPSHOT", + "LIST_SNAPSHOTS", + "CLONE_VOLUME", + //"PUBLISH_READONLY", + "EXPAND_VOLUME", + //"VOLUME_CONDITION", // added in v1.3.0 + //"GET_VOLUME", // added in v1.3.0 + ]; + } + + if (!("rpc" in options.service.node.capabilities)) { + this.ctx.logger.debug("setting default node caps"); + + switch (this.getDriverZfsResourceType()) { + case "filesystem": + options.service.node.capabilities.rpc = [ + //"UNKNOWN", + "STAGE_UNSTAGE_VOLUME", + "GET_VOLUME_STATS", + //"EXPAND_VOLUME", + //"VOLUME_CONDITION", + ]; + break; + case "volume": + options.service.node.capabilities.rpc = [ + //"UNKNOWN", + "STAGE_UNSTAGE_VOLUME", + "GET_VOLUME_STATS", + "EXPAND_VOLUME", + //"VOLUME_CONDITION", + ]; + break; + } + } + } + + /** + * only here for the helpers + * @returns + */ + async getZetabyte() { + return new Zetabyte({ + executor: { + spawn: function () { + throw new Error( + "cannot use the zb implementation to execute zfs commands, must use the http api" + ); + }, + }, + }); + } + + /** + * should create any necessary share resources + * should set the SHARE_VOLUME_CONTEXT_PROPERTY_NAME propery + * + * @param {*} datasetName + */ + async createShare(call, datasetName) { + const driverShareType = this.getDriverShareType(); + const httpClient = await this.getHttpClient(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const apiVersion = httpClient.getApiVersion(); + const zb = await this.getZetabyte(); + + let volume_context; + let properties; + let endpoint; + let response; + let share = {}; + + switch (driverShareType) { + case "nfs": + properties = await httpApiClient.DatasetGet(datasetName, [ + "mountpoint", + FREENAS_NFS_SHARE_PROPERTY_NAME, + ]); + this.ctx.logger.debug("zfs props data: %j", properties); + + // create nfs share + if ( + !zb.helpers.isPropertyValueSet( + properties[FREENAS_NFS_SHARE_PROPERTY_NAME].value + ) + ) { + switch (apiVersion) { + case 1: + case 2: + switch (apiVersion) { + case 1: + share = { + nfs_paths: [properties.mountpoint.value], + nfs_comment: `democratic-csi (${this.ctx.args.csiName}): ${datasetName}`, + nfs_network: + this.options.nfs.shareAllowedNetworks.join(","), + nfs_hosts: this.options.nfs.shareAllowedHosts.join(","), + nfs_alldirs: this.options.nfs.shareAlldirs, + nfs_ro: false, + nfs_quiet: false, + nfs_maproot_user: this.options.nfs.shareMaprootUser, + nfs_maproot_group: this.options.nfs.shareMaprootGroup, + nfs_mapall_user: this.options.nfs.shareMapallUser, + nfs_mapall_group: this.options.nfs.shareMapallGroup, + nfs_security: [], + }; + break; + case 2: + share = { + paths: [properties.mountpoint.value], + comment: `democratic-csi (${this.ctx.args.csiName}): ${datasetName}`, + networks: this.options.nfs.shareAllowedNetworks, + hosts: this.options.nfs.shareAllowedHosts, + alldirs: this.options.nfs.shareAlldirs, + ro: false, + quiet: false, + maproot_user: this.options.nfs.shareMaprootUser, + maproot_group: this.options.nfs.shareMaprootGroup, + mapall_user: this.options.nfs.shareMapallUser, + mapall_group: this.options.nfs.shareMapallGroup, + security: [], + }; + break; + } + + response = await httpClient.post("/sharing/nfs", share); + + /** + * v1 = 201 + * v2 = 200 + */ + if ([200, 201].includes(response.statusCode)) { + let sharePaths; + switch (apiVersion) { + case 1: + sharePaths = response.body.nfs_paths; + break; + case 2: + sharePaths = response.body.paths; + break; + } + + // FreeNAS responding with bad data + if (!sharePaths.includes(properties.mountpoint.value)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS responded with incorrect share data: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + //set zfs property + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_NFS_SHARE_PROPERTY_NAME]: response.body.id, + }); + } else { + /** + * v1 = 409 + * v2 = 422 + */ + if ( + [409, 422].includes(response.statusCode) && + (JSON.stringify(response.body).includes( + "You can't share same filesystem with all hosts twice." + ) || + JSON.stringify(response.body).includes( + "Another NFS share already exports this dataset for some network" + )) + ) { + let lookupShare = + await httpApiClient.findResourceByProperties( + "/sharing/nfs", + (item) => { + if ( + (item.nfs_paths && + item.nfs_paths.includes( + properties.mountpoint.value + )) || + (item.paths && + item.paths.includes(properties.mountpoint.value)) + ) { + return true; + } + return false; + } + ); + + if (!lookupShare) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS failed to find matching share` + ); + } + + //set zfs property + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_NFS_SHARE_PROPERTY_NAME]: lookupShare.id, + }); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating nfs share - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } + + volume_context = { + node_attach_driver: "nfs", + server: this.options.nfs.shareHost, + share: properties.mountpoint.value, + }; + return volume_context; + + break; + /** + * TODO: smb need to be more defensive like iscsi and nfs + * ensuring the path is valid and the shareName + */ + case "smb": + properties = await httpApiClient.DatasetGet(datasetName, [ + "mountpoint", + FREENAS_SMB_SHARE_PROPERTY_NAME, + ]); + this.ctx.logger.debug("zfs props data: %j", properties); + + let smbName; + + if (this.options.smb.nameTemplate) { + smbName = Handlebars.compile(this.options.smb.nameTemplate)({ + name: call.request.name, + parameters: call.request.parameters, + }); + } else { + smbName = zb.helpers.extractLeafName(datasetName); + } + + if (this.options.smb.namePrefix) { + smbName = this.options.smb.namePrefix + smbName; + } + + if (this.options.smb.nameSuffix) { + smbName += this.options.smb.nameSuffix; + } + + smbName = smbName.toLowerCase(); + + this.ctx.logger.info( + "FreeNAS creating smb share with name: " + smbName + ); + + // create smb share + if ( + !zb.helpers.isPropertyValueSet( + properties[FREENAS_SMB_SHARE_PROPERTY_NAME].value + ) + ) { + /** + * The only required parameters are: + * - path + * - name + * + * Note that over time it appears the list of available parameters has increased + * so in an effort to best support old versions of FreeNAS we should check the + * presense of each parameter in the config and set the corresponding parameter in + * the API request *only* if present in the config. + */ + switch (apiVersion) { + case 1: + case 2: + share = { + name: smbName, + path: properties.mountpoint.value, + }; + + let propertyMapping = { + shareAuxiliaryConfigurationTemplate: "auxsmbconf", + shareHome: "home", + shareAllowedHosts: "hostsallow", + shareDeniedHosts: "hostsdeny", + shareDefaultPermissions: "default_permissions", + shareGuestOk: "guestok", + shareGuestOnly: "guestonly", + shareShowHiddenFiles: "showhiddenfiles", + shareRecycleBin: "recyclebin", + shareBrowsable: "browsable", + shareAccessBasedEnumeration: "abe", + shareTimeMachine: "timemachine", + shareStorageTask: "storage_task", + }; + + for (const key in propertyMapping) { + if (this.options.smb.hasOwnProperty(key)) { + let value; + switch (key) { + case "shareAuxiliaryConfigurationTemplate": + value = Handlebars.compile( + this.options.smb.shareAuxiliaryConfigurationTemplate + )({ + name: call.request.name, + parameters: call.request.parameters, + }); + break; + default: + value = this.options.smb[key]; + break; + } + share[propertyMapping[key]] = value; + } + } + + switch (apiVersion) { + case 1: + endpoint = "/sharing/cifs"; + + // rename keys with cifs_ prefix + for (const key in share) { + share["cifs_" + key] = share[key]; + delete share[key]; + } + + // convert to comma-separated list + if (share.cifs_hostsallow) { + share.cifs_hostsallow = share.cifs_hostsallow.join(","); + } + + // convert to comma-separated list + if (share.cifs_hostsdeny) { + share.cifs_hostsdeny = share.cifs_hostsdeny.join(","); + } + break; + case 2: + endpoint = "/sharing/smb"; + break; + } + + response = await httpClient.post(endpoint, share); + + /** + * v1 = 201 + * v2 = 200 + */ + if ([200, 201].includes(response.statusCode)) { + share = response.body; + let sharePath; + let shareName; + switch (apiVersion) { + case 1: + sharePath = response.body.cifs_path; + shareName = response.body.cifs_name; + break; + case 2: + sharePath = response.body.path; + shareName = response.body.name; + break; + } + + if (shareName != smbName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS responded with incorrect share data: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + if (sharePath != properties.mountpoint.value) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS responded with incorrect share data: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + //set zfs property + await zb.zfs.set(datasetName, { + [FREENAS_SMB_SHARE_PROPERTY_NAME]: response.body.id, + }); + } else { + /** + * v1 = 409 + * v2 = 422 + */ + if ( + [409, 422].includes(response.statusCode) && + JSON.stringify(response.body).includes( + "A share with this name already exists." + ) + ) { + let lookupShare = + await httpApiClient.findResourceByProperties( + endpoint, + (item) => { + if ( + (item.cifs_path && + item.cifs_path == properties.mountpoint.value && + item.cifs_name && + item.cifs_name == smbName) || + (item.path && + item.path == properties.mountpoint.value && + item.name && + item.name == smbName) + ) { + return true; + } + return false; + } + ); + + if (!lookupShare) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS failed to find matching share` + ); + } + + //set zfs property + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_SMB_SHARE_PROPERTY_NAME]: lookupShare.id, + }); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating smb share - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } + + volume_context = { + node_attach_driver: "smb", + server: this.options.smb.shareHost, + share: smbName, + }; + return volume_context; + + break; + case "iscsi": + properties = await httpApiClient.DatasetGet(datasetName, [ + FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME, + FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME, + FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME, + ]); + this.ctx.logger.debug("zfs props data: %j", properties); + + let basename; + let iscsiName; + + if (this.options.iscsi.nameTemplate) { + iscsiName = Handlebars.compile(this.options.iscsi.nameTemplate)({ + name: call.request.name, + parameters: call.request.parameters, + }); + } else { + iscsiName = zb.helpers.extractLeafName(datasetName); + } + + if (this.options.iscsi.namePrefix) { + iscsiName = this.options.iscsi.namePrefix + iscsiName; + } + + if (this.options.iscsi.nameSuffix) { + iscsiName += this.options.iscsi.nameSuffix; + } + + // According to RFC3270, 'Each iSCSI node, whether an initiator or target, MUST have an iSCSI name. Initiators and targets MUST support the receipt of iSCSI names of up to the maximum length of 223 bytes.' + // https://kb.netapp.com/Advice_and_Troubleshooting/Miscellaneous/What_is_the_maximum_length_of_a_iSCSI_iqn_name + // https://tools.ietf.org/html/rfc3720 + iscsiName = iscsiName.toLowerCase(); + + let extentDiskName = "zvol/" + datasetName; + + /** + * limit is a FreeBSD limitation + * https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab + */ + if (extentDiskName.length > 63) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `extent disk name cannot exceed 63 characters: ${extentDiskName}` + ); + } + + this.ctx.logger.info( + "FreeNAS creating iscsi assets with name: " + iscsiName + ); + + const extentInsecureTpc = this.options.iscsi.hasOwnProperty( + "extentInsecureTpc" + ) + ? this.options.iscsi.extentInsecureTpc + : true; + + const extentXenCompat = this.options.iscsi.hasOwnProperty( + "extentXenCompat" + ) + ? this.options.iscsi.extentXenCompat + : false; + + const extentBlocksize = this.options.iscsi.hasOwnProperty( + "extentBlocksize" + ) + ? this.options.iscsi.extentBlocksize + : 512; + + const extentDisablePhysicalBlocksize = + this.options.iscsi.hasOwnProperty("extentDisablePhysicalBlocksize") + ? this.options.iscsi.extentDisablePhysicalBlocksize + : true; + + const extentRpm = this.options.iscsi.hasOwnProperty("extentRpm") + ? this.options.iscsi.extentRpm + : "SSD"; + + let extentAvailThreshold = this.options.iscsi.hasOwnProperty( + "extentAvailThreshold" + ) + ? Number(this.options.iscsi.extentAvailThreshold) + : null; + + if (!(extentAvailThreshold > 0 && extentAvailThreshold <= 100)) { + extentAvailThreshold = null; + } + + switch (apiVersion) { + case 1: + response = await httpClient.get( + "/services/iscsi/globalconfiguration" + ); + if (response.statusCode != 200) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error getting iscsi configuration - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + basename = response.body.iscsi_basename; + this.ctx.logger.verbose("FreeNAS ISCSI BASENAME: " + basename); + break; + case 2: + response = await httpClient.get("/iscsi/global"); + if (response.statusCode != 200) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error getting iscsi configuration - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + basename = response.body.basename; + this.ctx.logger.verbose("FreeNAS ISCSI BASENAME: " + basename); + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + + // if we got all the way to the TARGETTOEXTENT then we fully finished + // otherwise we must do all assets every time due to the interdependence of IDs etc + if ( + !zb.helpers.isPropertyValueSet( + properties[FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME].value + ) + ) { + switch (apiVersion) { + case 1: { + // create target + let target = { + iscsi_target_name: iscsiName, + iscsi_target_alias: "", // TODO: allow template for this + }; + + response = await httpClient.post( + "/services/iscsi/target", + target + ); + + // 409 if invalid + if (response.statusCode != 201) { + target = null; + if ( + response.statusCode == 409 && + JSON.stringify(response.body).includes( + "Target name already exists" + ) + ) { + target = await httpApiClient.findResourceByProperties( + "/services/iscsi/target", + { + iscsi_target_name: iscsiName, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi target - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + target = response.body; + } + + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + if (target.iscsi_target_name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); + + // set target.id on zvol + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id, + }); + + // create targetgroup(s) + // targetgroups do have IDs + for (let targetGroupConfig of this.options.iscsi.targetGroups) { + let targetGroup = { + iscsi_target: target.id, + iscsi_target_authgroup: + targetGroupConfig.targetGroupAuthGroup, + iscsi_target_authtype: targetGroupConfig.targetGroupAuthType + ? targetGroupConfig.targetGroupAuthType + : "None", + iscsi_target_portalgroup: + targetGroupConfig.targetGroupPortalGroup, + iscsi_target_initiatorgroup: + targetGroupConfig.targetGroupInitiatorGroup, + iscsi_target_initialdigest: "Auto", + }; + response = await httpClient.post( + "/services/iscsi/targetgroup", + targetGroup + ); + + // 409 if invalid + if (response.statusCode != 201) { + targetGroup = null; + /** + * 404 gets returned with an unable to process response when the DB is corrupted (has invalid entries in essense) + * + * To resolve properly the DB should be cleaned up + * /usr/local/etc/rc.d/django stop + * /usr/local/etc/rc.d/nginx stop + * sqlite3 /data/freenas-v1.db + * + * // this deletes everything, probably not what you want + * // should have a better query to only find entries where associated assets no longer exist + * DELETE from services_iscsitargetgroups; + * + * /usr/local/etc/rc.d/django restart + * /usr/local/etc/rc.d/nginx restart + */ + if ( + response.statusCode == 404 || + (response.statusCode == 409 && + JSON.stringify(response.body).includes( + "cannot be duplicated on a target" + )) + ) { + targetGroup = await httpApiClient.findResourceByProperties( + "/services/iscsi/targetgroup", + { + iscsi_target: target.id, + iscsi_target_portalgroup: + targetGroupConfig.targetGroupPortalGroup, + iscsi_target_initiatorgroup: + targetGroupConfig.targetGroupInitiatorGroup, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targetgroup - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetGroup = response.body; + } + + if (!targetGroup) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targetgroup` + ); + } + + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_GROUP: %j", + targetGroup + ); + } + + let extent = { + iscsi_target_extent_comment: "", // TODO: allow template for this value + iscsi_target_extent_type: "Disk", // Disk/File, after save Disk becomes "ZVOL" + iscsi_target_extent_name: iscsiName, + iscsi_target_extent_insecure_tpc: extentInsecureTpc, + //iscsi_target_extent_naa: "0x3822690834aae6c5", + iscsi_target_extent_disk: extentDiskName, + iscsi_target_extent_xen: extentXenCompat, + iscsi_target_extent_avail_threshold: extentAvailThreshold, + iscsi_target_extent_blocksize: Number(extentBlocksize), + iscsi_target_extent_pblocksize: extentDisablePhysicalBlocksize, + iscsi_target_extent_rpm: isNaN(Number(extentRpm)) + ? "SSD" + : Number(extentRpm), + iscsi_target_extent_ro: false, + }; + response = await httpClient.post( + "/services/iscsi/extent", + extent + ); + + // 409 if invalid + if (response.statusCode != 201) { + extent = null; + if ( + response.statusCode == 409 && + JSON.stringify(response.body).includes( + "Extent name must be unique" + ) + ) { + extent = await httpApiClient.findResourceByProperties( + "/services/iscsi/extent", + { iscsi_target_extent_name: iscsiName } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi extent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + extent = response.body; + } + + if (!extent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi extent` + ); + } + + if (extent.iscsi_target_extent_name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi extent` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); + + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id, + }); + + // create targettoextent + let targetToExtent = { + iscsi_target: target.id, + iscsi_extent: extent.id, + iscsi_lunid: 0, + }; + response = await httpClient.post( + "/services/iscsi/targettoextent", + targetToExtent + ); + + // 409 if invalid + if (response.statusCode != 201) { + targetToExtent = null; + + // LUN ID is already being used for this target. + // Extent is already in this target. + if ( + response.statusCode == 409 && + (JSON.stringify(response.body).includes( + "Extent is already in this target." + ) || + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + )) + ) { + targetToExtent = await httpApiClient.findResourceByProperties( + "/services/iscsi/targettoextent", + { + iscsi_target: target.id, + iscsi_extent: extent.id, + iscsi_lunid: 0, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targettoextent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetToExtent = response.body; + } + + if (!targetToExtent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targettoextent` + ); + } + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_TO_EXTENT: %j", + targetToExtent + ); + + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: + targetToExtent.id, + }); + + break; + } + case 2: + // create target and targetgroup + //let targetId; + let targetGroups = []; + for (let targetGroupConfig of this.options.iscsi.targetGroups) { + targetGroups.push({ + portal: targetGroupConfig.targetGroupPortalGroup, + initiator: targetGroupConfig.targetGroupInitiatorGroup, + auth: + targetGroupConfig.targetGroupAuthGroup > 0 + ? targetGroupConfig.targetGroupAuthGroup + : null, + authmethod: + targetGroupConfig.targetGroupAuthType.length > 0 + ? targetGroupConfig.targetGroupAuthType + .toUpperCase() + .replace(" ", "_") + : "NONE", + }); + } + let target = { + name: iscsiName, + alias: null, // cannot send "" error: handler error - driver: FreeNASDriver method: CreateVolume error: {"name":"GrpcError","code":2,"message":"received error creating iscsi target - code: 422 body: {\"iscsi_target_create.alias\":[{\"message\":\"Alias already exists\",\"errno\":22}]}"} + mode: "ISCSI", + groups: targetGroups, + }; + + response = await httpClient.post("/iscsi/target", target); + + // 409 if invalid + if (response.statusCode != 200) { + target = null; + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes( + "Target name already exists" + ) + ) { + target = await httpApiClient.findResourceByProperties( + "/iscsi/target", + { + name: iscsiName, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi target - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + target = response.body; + } + + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + if (target.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + + // handle situations/race conditions where groups failed to be added/created on the target + // groups":[{"portal":1,"initiator":1,"auth":null,"authmethod":"NONE"},{"portal":2,"initiator":1,"auth":null,"authmethod":"NONE"}] + // TODO: this logic could be more intelligent but this should do for now as it appears in the failure scenario no groups are added + // in other words, I have never seen them invalid, only omitted so this should be enough + if (target.groups.length != targetGroups.length) { + response = await httpClient.put( + `/iscsi/target/id/${target.id}`, + { + groups: targetGroups, + } + ); + + if (response.statusCode != 200) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed setting target groups` + ); + } else { + target = response.body; + + // re-run sanity checks + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + if (target.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + + if (target.groups.length != targetGroups.length) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed setting target groups` + ); + } + } + } + + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); + + // set target.id on zvol + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id, + }); + + let extent = { + comment: "", // TODO: allow this to be templated + type: "DISK", // Disk/File, after save Disk becomes "ZVOL" + name: iscsiName, + //iscsi_target_extent_naa: "0x3822690834aae6c5", + disk: extentDiskName, + insecure_tpc: extentInsecureTpc, + xen: extentXenCompat, + avail_threshold: extentAvailThreshold, + blocksize: Number(extentBlocksize), + pblocksize: extentDisablePhysicalBlocksize, + rpm: "" + extentRpm, // should be a string + ro: false, + }; + + response = await httpClient.post("/iscsi/extent", extent); + + // 409 if invalid + if (response.statusCode != 200) { + extent = null; + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes( + "Extent name must be unique" + ) + ) { + extent = await httpApiClient.findResourceByProperties( + "/iscsi/extent", + { + name: iscsiName, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi extent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + extent = response.body; + } + + if (!extent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi extent` + ); + } + + if (extent.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi extent` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); + + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id, + }); + + // create targettoextent + let targetToExtent = { + target: target.id, + extent: extent.id, + lunid: 0, + }; + response = await httpClient.post( + "/iscsi/targetextent", + targetToExtent + ); + + if (response.statusCode != 200) { + targetToExtent = null; + + // LUN ID is already being used for this target. + // Extent is already in this target. + if ( + response.statusCode == 422 && + (JSON.stringify(response.body).includes( + "Extent is already in this target." + ) || + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + )) + ) { + targetToExtent = await httpApiClient.findResourceByProperties( + "/iscsi/targetextent", + { + target: target.id, + extent: extent.id, + lunid: 0, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targetextent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetToExtent = response.body; + } + + if (!targetToExtent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targetextent` + ); + } + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_TO_EXTENT: %j", + targetToExtent + ); + + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: + targetToExtent.id, + }); + + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } + + // iqn = target + let iqn = basename + ":" + iscsiName; + this.ctx.logger.info("FreeNAS iqn: " + iqn); + + // store this off to make delete process more bullet proof + await httpApiClient.DatasetSet(datasetName, { + [FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME]: iscsiName, + }); + + volume_context = { + node_attach_driver: "iscsi", + portal: this.options.iscsi.targetPortal, + portals: this.options.iscsi.targetPortals + ? this.options.iscsi.targetPortals.join(",") + : "", + interface: this.options.iscsi.interface || "", + iqn: iqn, + lun: 0, + }; + return volume_context; + + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown driverShareType ${driverShareType}` + ); + } + } + + async deleteShare(call, datasetName) { + const driverShareType = this.getDriverShareType(); + const httpClient = await this.getHttpClient(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const apiVersion = httpClient.getApiVersion(); + const zb = await this.getZetabyte(); + + let properties; + let response; + let endpoint; + let shareId; + let deleteAsset; + let sharePaths; + + switch (driverShareType) { + case "nfs": + try { + properties = await httpApiClient.DatasetGet(datasetName, [ + "mountpoint", + FREENAS_NFS_SHARE_PROPERTY_NAME, + ]); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + return; + } + throw err; + } + this.ctx.logger.debug("zfs props data: %j", properties); + + shareId = properties[FREENAS_NFS_SHARE_PROPERTY_NAME].value; + + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(shareId)) { + // remove nfs share + switch (apiVersion) { + case 1: + case 2: + endpoint = "/sharing/nfs/"; + if (apiVersion == 2) { + endpoint += "id/"; + } + endpoint += shareId; + + response = await httpClient.get(endpoint); + + // assume share is gone for now + if ([404, 500].includes(response.statusCode)) { + } else { + switch (apiVersion) { + case 1: + sharePaths = response.body.nfs_paths; + break; + case 2: + sharePaths = response.body.paths; + break; + } + + deleteAsset = sharePaths.some((value) => { + return value == properties.mountpoint.value; + }); + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + + // returns a 500 if does not exist + // v1 = 204 + // v2 = 200 + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting nfs share - share: ${shareId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await httpApiClient.DatasetInherit( + datasetName, + FREENAS_NFS_SHARE_PROPERTY_NAME + ); + } + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } + break; + case "smb": + try { + properties = await httpApiClient.DatasetGet(datasetName, [ + "mountpoint", + FREENAS_SMB_SHARE_PROPERTY_NAME, + ]); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + return; + } + throw err; + } + this.ctx.logger.debug("zfs props data: %j", properties); + + shareId = properties[FREENAS_SMB_SHARE_PROPERTY_NAME].value; + + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(shareId)) { + // remove smb share + switch (apiVersion) { + case 1: + case 2: + switch (apiVersion) { + case 1: + endpoint = `/sharing/cifs/${shareId}`; + break; + case 2: + endpoint = `/sharing/smb/id/${shareId}`; + break; + } + + response = await httpClient.get(endpoint); + + // assume share is gone for now + if ([404, 500].includes(response.statusCode)) { + } else { + switch (apiVersion) { + case 1: + sharePaths = [response.body.cifs_path]; + break; + case 2: + sharePaths = [response.body.path]; + break; + } + + deleteAsset = sharePaths.some((value) => { + return value == properties.mountpoint.value; + }); + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + + // returns a 500 if does not exist + // v1 = 204 + // v2 = 200 + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting smb share - share: ${shareId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await zb.zfs.inherit( + datasetName, + FREENAS_SMB_SHARE_PROPERTY_NAME + ); + } + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } + break; + case "iscsi": + // Delete target + // NOTE: deletting a target inherently deletes associated targetgroup(s) and targettoextent(s) + + // Delete extent + try { + properties = await httpApiClient.DatasetGet(datasetName, [ + FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME, + FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME, + FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME, + FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME, + ]); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + return; + } + throw err; + } + + this.ctx.logger.debug("zfs props data: %j", properties); + + let targetId = properties[FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME].value; + let extentId = properties[FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME].value; + let iscsiName = + properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value; + let assetName; + + switch (apiVersion) { + case 1: + case 2: + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(targetId)) { + // https://jira.ixsystems.com/browse/NAS-103952 + + // v1 - /services/iscsi/target/{id}/ + // v2 - /iscsi/target/id/{id} + endpoint = ""; + if (apiVersion == 1) { + endpoint += "/services"; + } + endpoint += "/iscsi/target/"; + if (apiVersion == 2) { + endpoint += "id/"; + } + endpoint += targetId; + response = await httpClient.get(endpoint); + + // assume is gone for now + if ([404, 500].includes(response.statusCode)) { + } else { + deleteAsset = true; + assetName = null; + + // checking if set for backwards compatibility + if (zb.helpers.isPropertyValueSet(iscsiName)) { + switch (apiVersion) { + case 1: + assetName = response.body.iscsi_target_name; + break; + case 2: + assetName = response.body.name; + break; + } + + if (assetName != iscsiName) { + deleteAsset = false; + } + } + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi target - target: ${targetId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await httpApiClient.DatasetInherit( + datasetName, + FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME + ); + } else { + this.ctx.logger.debug( + "not deleting iscsitarget asset as it appears ID %s has been re-used: zfs name - %s, iscsitarget name - %s", + targetId, + iscsiName, + assetName + ); + } + } + } + + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(extentId)) { + // v1 - /services/iscsi/targettoextent/{id}/ + // v2 - /iscsi/targetextent/id/{id} + if (apiVersion == 1) { + endpoint = "/services/iscsi/extent/"; + } else { + endpoint = "/iscsi/extent/id/"; + } + endpoint += extentId; + response = await httpClient.get(endpoint); + + // assume is gone for now + if ([404, 500].includes(response.statusCode)) { + } else { + deleteAsset = true; + assetName = null; + + // checking if set for backwards compatibility + if (zb.helpers.isPropertyValueSet(iscsiName)) { + switch (apiVersion) { + case 1: + assetName = response.body.iscsi_target_extent_name; + break; + case 2: + assetName = response.body.name; + break; + } + + if (assetName != iscsiName) { + deleteAsset = false; + } + } + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi extent - extent: ${extentId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await httpApiClient.DatasetInherit( + datasetName, + FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME + ); + } else { + this.ctx.logger.debug( + "not deleting iscsiextent asset as it appears ID %s has been re-used: zfs name - %s, iscsiextent name - %s", + extentId, + iscsiName, + assetName + ); + } + } + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown driverShareType ${driverShareType}` + ); + } + } + + async expandVolume(call, datasetName) { + const driverShareType = this.getDriverShareType(); + + return; + const sshClient = this.getSshClient(); + + switch (driverShareType) { + case "iscsi": + const isScale = await this.getIsScale(); + let command; + let reload = false; + if (isScale) { + command = sshClient.buildCommand("systemctl", ["reload", "scst"]); + reload = true; + } else { + command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); + reload = true; + } + + if (reload) { + if (this.getSudoEnabled()) { + command = (await this.getSudoPath()) + " " + command; + } + + this.ctx.logger.verbose( + "FreeNAS reloading iscsi daemon: %s", + command + ); + + let response = await sshClient.exec(command); + if (response.code != 0) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error reloading iscsi daemon: ${JSON.stringify(response)}` + ); + } + } + break; + } + } + + /** + * cannot make this a storage class parameter as storage class/etc context is *not* sent + * into various calls such as GetControllerCapabilities etc + */ + getDriverZfsResourceType() { + switch (this.options.driver) { + case "freenas-api-nfs": + case "truenas-api-nfs": + case "freenas-api-smb": + case "truenas-api-smb": + return "filesystem"; + case "freenas-api-iscsi": + case "truenas-api-iscsi": + return "volume"; + default: + throw new Error("unknown driver: " + this.ctx.args.driver); + } + } + + getDriverShareType() { + switch (this.options.driver) { + case "freenas-api-nfs": + case "truenas-api-nfs": + return "nfs"; + case "freenas-api-smb": + case "truenas-api-smb": + return "smb"; + case "freenas-api-iscsi": + case "truenas-api-iscsi": + return "iscsi"; + default: + throw new Error("unknown driver: " + this.ctx.args.driver); + } + } + + getDatasetParentName() { + let datasetParentName = this.options.zfs.datasetParentName; + datasetParentName = datasetParentName.replace(/\/$/, ""); + return datasetParentName; + } + + getVolumeParentDatasetName() { + let datasetParentName = this.getDatasetParentName(); + //datasetParentName += "/v"; + datasetParentName = datasetParentName.replace(/\/$/, ""); + return datasetParentName; + } + + getDetachedSnapshotParentDatasetName() { + //let datasetParentName = this.getDatasetParentName(); + let datasetParentName = this.options.zfs.detachedSnapshotsDatasetParentName; + //datasetParentName += "/s"; + datasetParentName = datasetParentName.replace(/\/$/, ""); + return datasetParentName; + } + + async getHttpClient() { + const client = new HttpClient(this.options.httpConnection); + client.logger = this.ctx.logger; + client.setApiVersion(2); // requires version 2 + + return client; + } + + async getTrueNASHttpApiClient() { + const driver = this; + const httpClient = await this.getHttpClient(); + const apiClient = new TrueNASApiClient(httpClient, driver.ctx.cache); + + return apiClient; + } + + assertCapabilities(capabilities) { + const driverZfsResourceType = this.getDriverZfsResourceType(); + this.ctx.logger.verbose("validating capabilities: %j", capabilities); + + let message = null; + //[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}] + const valid = capabilities.every((capability) => { + switch (driverZfsResourceType) { + case "filesystem": + if (capability.access_type != "mount") { + message = `invalid access_type ${capability.access_type}`; + return false; + } + + if ( + capability.mount.fs_type && + !["nfs", "cifs"].includes(capability.mount.fs_type) + ) { + message = `invalid fs_type ${capability.mount.fs_type}`; + return false; + } + + if ( + ![ + "UNKNOWN", + "SINGLE_NODE_WRITER", + "SINGLE_NODE_READER_ONLY", + "MULTI_NODE_READER_ONLY", + "MULTI_NODE_SINGLE_WRITER", + "MULTI_NODE_MULTI_WRITER", + ].includes(capability.access_mode.mode) + ) { + message = `invalid access_mode, ${capability.access_mode.mode}`; + return false; + } + + return true; + case "volume": + if (capability.access_type == "mount") { + if ( + capability.mount.fs_type && + !["ext3", "ext4", "ext4dev", "xfs"].includes( + capability.mount.fs_type + ) + ) { + message = `invalid fs_type ${capability.mount.fs_type}`; + return false; + } + } + + if ( + ![ + "UNKNOWN", + "SINGLE_NODE_WRITER", + "SINGLE_NODE_READER_ONLY", + "MULTI_NODE_READER_ONLY", + "MULTI_NODE_SINGLE_WRITER", + ].includes(capability.access_mode.mode) + ) { + message = `invalid access_mode, ${capability.access_mode.mode}`; + return false; + } + + return true; + } + }); + + return { valid, message }; + } + + /** + * Ensure sane options are used etc + * true = ready + * false = not ready, but progressiong towards ready + * throw error = faulty setup + * + * @param {*} call + */ + async Probe(call) { + const driver = this; + + if (driver.ctx.args.csiMode.includes("controller")) { + let datasetParentName = this.getVolumeParentDatasetName() + "/"; + let snapshotParentDatasetName = + this.getDetachedSnapshotParentDatasetName() + "/"; + if ( + datasetParentName.startsWith(snapshotParentDatasetName) || + snapshotParentDatasetName.startsWith(datasetParentName) + ) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `datasetParentName and detachedSnapshotsDatasetParentName must not overlap` + ); + } + return { ready: { value: true } }; + } else { + return { ready: { value: true } }; + } + } + + /** + * Create a volume doing in essence the following: + * 1. create dataset + * 2. create nfs share + * + * Should return 2 parameters + * 1. `server` - host/ip of the nfs server + * 2. `share` - path of the mount shared + * + * @param {*} call + */ + async CreateVolume(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName(); + let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K"; + let name = call.request.name; + let volume_content_source = call.request.volume_content_source; + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume name is required` + ); + } + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + if (result.valid !== true) { + throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message); + } + } + + if ( + call.request.capacity_range.required_bytes > 0 && + call.request.capacity_range.limit_bytes > 0 && + call.request.capacity_range.required_bytes > + call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required_bytes is greather than limit_bytes` + ); + } + + /** + * NOTE: avoid the urge to templatize this given the name length limits for zvols + * ie: namespace-name may quite easily exceed 58 chars + */ + const datasetName = datasetParentName + "/" + name; + let capacity_bytes = + call.request.capacity_range.required_bytes || + call.request.capacity_range.limit_bytes; + + if (capacity_bytes && driverZfsResourceType == "volume") { + //make sure to align capacity_bytes with zvol blocksize + //volume size must be a multiple of volume block size + capacity_bytes = zb.helpers.generateZvolSize( + capacity_bytes, + zvolBlocksize + ); + } + if (!capacity_bytes) { + //should never happen, value must be set + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume capacity is required (either required_bytes or limit_bytes)` + ); + } + + // ensure *actual* capacity is not greater than limit + if ( + call.request.capacity_range.limit_bytes && + call.request.capacity_range.limit_bytes > 0 && + capacity_bytes > call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required volume capacity is greater than limit` + ); + } + + /** + * This is specifically a FreeBSD limitation, not sure what linux limit is + * https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab + * https://www.ixsystems.com/documentation/freenas/11.3-BETA1/intro.html#path-and-name-lengths + * https://www.freebsd.org/cgi/man.cgi?query=devfs + */ + if (driverZfsResourceType == "volume") { + let extentDiskName = "zvol/" + datasetName; + if (extentDiskName.length > 63) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `extent disk name cannot exceed 63 characters: ${extentDiskName}` + ); + } + } + + let response, command; + let volume_content_source_snapshot_id; + let volume_content_source_volume_id; + let fullSnapshotName; + let volumeProperties = {}; + + // user-supplied properties + // put early to prevent stupid (user-supplied values overwriting system values) + if (driver.options.zfs.datasetProperties) { + for (let property in driver.options.zfs.datasetProperties) { + let value = driver.options.zfs.datasetProperties[property]; + const template = Handlebars.compile(value); + + volumeProperties[property] = template({ + parameters: call.request.parameters, + }); + } + } + + volumeProperties[VOLUME_CSI_NAME_PROPERTY_NAME] = name; + volumeProperties[MANAGED_PROPERTY_NAME] = "true"; + volumeProperties[VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME] = + driver.options.driver; + if (driver.options.instance_id) { + volumeProperties[VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME] = + driver.options.instance_id; + } + + // TODO: also set access_mode as property? + // TODO: also set fsType as property? + + // zvol enables reservation by default + // this implements 'sparse' zvols + if (driverZfsResourceType == "volume") { + if (!this.options.zfs.zvolEnableReservation) { + volumeProperties.refreservation = 0; + } + } + + let detachedClone = false; + + // create dataset + if (volume_content_source) { + volumeProperties[VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME] = + volume_content_source.type; + switch (volume_content_source.type) { + // must be available when adverstising CREATE_DELETE_SNAPSHOT + // simply clone + case "snapshot": + try { + let tmpDetachedClone = JSON.parse( + driver.getNormalizedParameterValue( + call.request.parameters, + "detachedVolumesFromSnapshots" + ) + ); + if (typeof tmpDetachedClone === "boolean") { + detachedClone = tmpDetachedClone; + } + } catch (e) {} + + volumeProperties[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME] = + volume_content_source.snapshot.snapshot_id; + volume_content_source_snapshot_id = + volume_content_source.snapshot.snapshot_id; + + // zfs origin property contains parent info, ie: pool0/k8s/test/PVC-111@clone-test + if (zb.helpers.isZfsSnapshot(volume_content_source_snapshot_id)) { + fullSnapshotName = + datasetParentName + "/" + volume_content_source_snapshot_id; + } else { + fullSnapshotName = + snapshotParentDatasetName + + "/" + + volume_content_source_snapshot_id + + "@" + + VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX + + name; + } + + driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName); + + if (!zb.helpers.isZfsSnapshot(volume_content_source_snapshot_id)) { + try { + await httpApiClient.SnapshotCreate(fullSnapshotName); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_snapshot_id ${volume_content_source_snapshot_id} does not exist` + ); + } + + throw err; + } + } + + if (detachedClone) { + try { + // TODO: fix this + response = await zb.zfs.send_receive( + fullSnapshotName, + [], + datasetName, + [] + ); + + response = await httpApiClient.DatasetSet(datasetName, volumeProperties); + } catch (err) { + if ( + err.toString().includes("destination") && + err.toString().includes("exists") + ) { + // move along + } else { + throw err; + } + } + + // remove snapshots from target + await this.removeSnapshotsFromDatatset(datasetName, { + force: true, + }); + } else { + try { + response = await httpApiClient.CloneCreate( + fullSnapshotName, + datasetName, + { + dataset_properties: volumeProperties, + } + ); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "dataset does not exists" + ); + } + + throw err; + } + } + + if (!zb.helpers.isZfsSnapshot(volume_content_source_snapshot_id)) { + try { + // schedule snapshot removal from source + await httpApiClient.SnapshotDelete(fullSnapshotName, { + defer: true, + }); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_snapshot_id ${volume_content_source_snapshot_id} does not exist` + ); + } + + throw err; + } + } + + break; + // must be available when adverstising CLONE_VOLUME + // create snapshot first, then clone + case "volume": + try { + let tmpDetachedClone = JSON.parse( + driver.getNormalizedParameterValue( + call.request.parameters, + "detachedVolumesFromVolumes" + ) + ); + if (typeof tmpDetachedClone === "boolean") { + detachedClone = tmpDetachedClone; + } + } catch (e) {} + + volumeProperties[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME] = + volume_content_source.volume.volume_id; + volume_content_source_volume_id = + volume_content_source.volume.volume_id; + + fullSnapshotName = + datasetParentName + + "/" + + volume_content_source_volume_id + + "@" + + VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX + + name; + + driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName); + + // create snapshot + try { + response = await httpApiClient.SnapshotCreate(fullSnapshotName); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "dataset does not exists" + ); + } + + throw err; + } + + if (detachedClone) { + try { + // TODO: fix this + response = await zb.zfs.send_receive( + fullSnapshotName, + [], + datasetName, + [] + ); + } catch (err) { + if ( + err.toString().includes("destination") && + err.toString().includes("exists") + ) { + // move along + } else { + throw err; + } + } + + response = await httpApiClient.DatasetSet(datasetName, volumeProperties); + + // remove snapshots from target + await this.removeSnapshotsFromDatatset(datasetName, { + force: true, + }); + + // remove snapshot from source + await httpApiClient.SnapshotDelete(fullSnapshotName, { + defer: true, + }); + } else { + // create clone + // zfs origin property contains parent info, ie: pool0/k8s/test/PVC-111@clone-test + try { + response = await httpApiClient.CloneCreate(fullSnapshotName, datasetName, { + dataset_properties: volumeProperties, + }); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "dataset does not exists" + ); + } + + throw err; + } + } + break; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `invalid volume_content_source type: ${volume_content_source.type}` + ); + break; + } + } else { + // force blocksize on newly created zvols + if (driverZfsResourceType == "volume") { + volumeProperties.volblocksize = zvolBlocksize; + } + + await httpApiClient.DatasetCreate(datasetName, { + ...httpApiClient.getSystemProperties(volumeProperties), + type: driverZfsResourceType.toUpperCase(), + volsize: driverZfsResourceType == "volume" ? capacity_bytes : undefined, + create_ancestors: true, + user_properties: httpApiClient.getPropertiesKeyValueArray( + httpApiClient.getUserProperties(volumeProperties) + ), + }); + } + + let setProps = false; + let setPerms = false; + let properties = {}; + let volume_context = {}; + + switch (driverZfsResourceType) { + case "filesystem": + // set quota + if (this.options.zfs.datasetEnableQuotas) { + setProps = true; + properties.refquota = capacity_bytes; + } + + // set reserve + if (this.options.zfs.datasetEnableReservation) { + setProps = true; + properties.refreservation = capacity_bytes; + } + + // quota for dataset and all children + // reserved for dataset and all children + + // dedup + // ro? + // record size + + // set properties + if (setProps) { + await httpApiClient.DatasetSet(datasetName, properties); + } + + //datasetPermissionsMode: 0777, + //datasetPermissionsUser: "root", + //datasetPermissionsGroup: "wheel", + + // get properties needed for remaining calls + properties = await httpApiClient.DatasetGet(datasetName, [ + "mountpoint", + "refquota", + "compression", + VOLUME_CSI_NAME_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME, + ]); + driver.ctx.logger.debug("zfs props data: %j", properties); + + // set mode + let perms = { + path: properties.mountpoint.value, + }; + if (this.options.zfs.datasetPermissionsMode) { + setPerms = true; + perms.mode = this.options.zfs.datasetPermissionsMode; + } + + // set ownership + if ( + this.options.zfs.hasOwnProperty("datasetPermissionsUser") || + this.options.zfs.hasOwnProperty("datasetPermissionsGroup") + ) { + // TODO: ensure the values are numbers and not strings + setPerms = true; + perms.uid = Number(this.options.zfs.datasetPermissionsUser); + perms.gid = Number(this.options.zfs.datasetPermissionsGroup); + } + + if (setPerms) { + await httpApiClient.FilesystemSetperm(perms); + } + + // set acls + // TODO: this is unsfafe approach, make it better + // probably could see if ^-.*\s and split and then shell escape + if (this.options.zfs.datasetPermissionsAcls) { + for (const acl of this.options.zfs.datasetPermissionsAcls) { + perms = { + path: properties.mountpoint.value, + dacl: acl, + }; + // TODO: FilesystemSetacl? + } + } + + break; + case "volume": + // set properties + // set reserve + setProps = true; + + // this should be already set, but when coming from a volume source + // it may not match that of the source + // TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured + properties.volsize = capacity_bytes; + + //dedup + //compression + + if (setProps) { + await httpApiClient.DatasetSet(datasetName, properties); + } + + break; + } + + volume_context = await this.createShare(call, datasetName); + await httpApiClient.DatasetSet(datasetName, { + [SHARE_VOLUME_CONTEXT_PROPERTY_NAME]: JSON.stringify(volume_context), + }); + + volume_context["provisioner_driver"] = driver.options.driver; + if (driver.options.instance_id) { + volume_context["provisioner_driver_instance_id"] = + driver.options.instance_id; + } + + // set this just before sending out response so we know if volume completed + // this should give us a relatively sane way to clean up artifacts over time + await httpApiClient.DatasetSet(datasetName, { + [SUCCESS_PROPERTY_NAME]: "true", + }); + + const res = { + volume: { + volume_id: name, + //capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0 + capacity_bytes: + this.options.zfs.datasetEnableQuotas || + driverZfsResourceType == "volume" + ? capacity_bytes + : 0, + content_source: volume_content_source, + volume_context, + }, + }; + + return res; + } + + /** + * Delete a volume + * + * Deleting a volume consists of the following steps: + * 1. delete the nfs share + * 2. delete the dataset + * + * @param {*} call + */ + async DeleteVolume(call) { + const driver = this; + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let name = call.request.volume_id; + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + const datasetName = datasetParentName + "/" + name; + let properties; + + // get properties needed for remaining calls + try { + properties = await httpApiClient.DatasetGet(datasetName, [ + "mountpoint", + "origin", + "refquota", + "compression", + VOLUME_CSI_NAME_PROPERTY_NAME, + ]); + } catch (err) { + let ignore = false; + if (err.toString().includes("dataset does not exist")) { + ignore = true; + } + + if (!ignore) { + throw err; + } + } + + driver.ctx.logger.debug("dataset properties: %j", properties); + + // remove share resources + await this.deleteShare(call, datasetName); + + // remove parent snapshot if appropriate with defer + if ( + properties && + properties.origin && + properties.origin.value != "-" && + zb.helpers + .extractSnapshotName(properties.origin.value) + .startsWith(VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX) + ) { + driver.ctx.logger.debug( + "removing with defer source snapshot: %s", + properties.origin.value + ); + + try { + await zb.zfs.destroy(properties.origin.value, { + recurse: true, + force: true, + defer: true, + }); + } catch (err) { + if (err.toString().includes("snapshot has dependent clones")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "snapshot has dependent clones" + ); + } + throw err; + } + } + + // NOTE: -f does NOT allow deletes if dependent filesets exist + // NOTE: -R will recursively delete items + dependent filesets + // delete dataset + try { + await httpApiClient.DatasetDelete(datasetName, { + recursive: true, + force: true, + }); + } catch (err) { + if (err.toString().includes("filesystem has dependent clones")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "filesystem has dependent clones" + ); + } + + throw err; + } + + return {}; + } + + /** + * + * @param {*} call + */ + async CreateSnapshot(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let detachedSnapshot = false; + try { + let tmpDetachedSnapshot = JSON.parse( + driver.getNormalizedParameterValue( + call.request.parameters, + "detachedSnapshots" + ) + ); // snapshot class parameter + if (typeof tmpDetachedSnapshot === "boolean") { + detachedSnapshot = tmpDetachedSnapshot; + } + } catch (e) {} + + let response; + const volumeParentDatasetName = this.getVolumeParentDatasetName(); + let datasetParentName; + let snapshotProperties = {}; + let types = []; + + if (detachedSnapshot) { + datasetParentName = this.getDetachedSnapshotParentDatasetName(); + if (driverZfsResourceType == "filesystem") { + types.push("filesystem"); + } else { + types.push("volume"); + } + } else { + datasetParentName = this.getVolumeParentDatasetName(); + types.push("snapshot"); + } + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + // both these are required + let source_volume_id = call.request.source_volume_id; + let name = call.request.name; + + if (!source_volume_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot source_volume_id is required` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name is required` + ); + } + + const datasetName = datasetParentName + "/" + source_volume_id; + snapshotProperties[SNAPSHOT_CSI_NAME_PROPERTY_NAME] = name; + snapshotProperties[SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME] = + source_volume_id; + snapshotProperties[MANAGED_PROPERTY_NAME] = "true"; + + driver.ctx.logger.verbose("requested snapshot name: %s", name); + + let invalid_chars; + invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi); + if (invalid_chars) { + invalid_chars = String.prototype.concat( + ...new Set(invalid_chars.join("")) + ); + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name contains invalid characters: ${invalid_chars}` + ); + } + + // https://stackoverflow.com/questions/32106243/regex-to-remove-all-non-alpha-numeric-and-replace-spaces-with/32106277 + name = name.replace(/[^a-z0-9_\-:.+]+/gi, ""); + + driver.ctx.logger.verbose("cleansed snapshot name: %s", name); + + let fullSnapshotName; + let snapshotDatasetName; + let tmpSnapshotName; + if (detachedSnapshot) { + fullSnapshotName = datasetName + "/" + name; + } else { + fullSnapshotName = datasetName + "@" + name; + } + + driver.ctx.logger.verbose("full snapshot name: %s", fullSnapshotName); + + if (detachedSnapshot) { + tmpSnapshotName = + volumeParentDatasetName + + "/" + + source_volume_id + + "@" + + VOLUME_SOURCE_DETACHED_SNAPSHOT_PREFIX + + name; + snapshotDatasetName = datasetName + "/" + name; + + await httpApiClient.DatasetCreate(datasetName, { + create_ancestors: true, + }); + + try { + await httpApiClient.SnapshotCreate(tmpSnapshotName); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_volume_id ${source_volume_id} does not exist` + ); + } + + throw err; + } + + try { + //TODO: get the value from the response and wait for the job to finish + response = await httpApiClient.ReplicationRunOnetime({ + direction: "PUSH", + transport: "LOCAL", + source_datasets: [tmpSnapshotName], + target_dataset: snapshotDatasetName, + recursive: false, + retention_policy: null, + }); + + response = await httpApiClient.DatasetSet( + snapshotDatasetName, + snapshotProperties + ); + } catch (err) { + if ( + err.toString().includes("destination") && + err.toString().includes("exists") + ) { + // move along + } else { + throw err; + } + } + + // remove snapshot from target + await httpApiClient.SnapshotDelete( + snapshotDatasetName + + "@" + + zb.helpers.extractSnapshotName(tmpSnapshotName), + { + //recurse: true, + //force: true, + defer: true, + } + ); + + // remove snapshot from source + await httpApiClient.SnapshotDelete(tmpSnapshotName, { + //recurse: true, + //force: true, + defer: true, + }); + } else { + try { + await httpApiClient.SnapshotCreate(fullSnapshotName, { + properties: snapshotProperties, + }); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_volume_id ${source_volume_id} does not exist` + ); + } + + throw err; + } + } + + let properties; + let fetchProperties = [ + "name", + "creation", + "mountpoint", + "refquota", + "avail", + "used", + VOLUME_CSI_NAME_PROPERTY_NAME, + SNAPSHOT_CSI_NAME_PROPERTY_NAME, + SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME, + MANAGED_PROPERTY_NAME, + ]; + + if (detachedSnapshot) { + properties = await httpApiClient.DatasetGet( + fullSnapshotName, + fetchProperties + ); + } else { + properties = await httpApiClient.SnapshotGet( + fullSnapshotName, + fetchProperties + ); + } + + driver.ctx.logger.verbose("snapshot properties: %j", properties); + + // set this just before sending out response so we know if volume completed + // this should give us a relatively sane way to clean up artifacts over time + //await zb.zfs.set(fullSnapshotName, { [SUCCESS_PROPERTY_NAME]: "true" }); + + return { + snapshot: { + /** + * The purpose of this field is to give CO guidance on how much space + * is needed to create a volume from this snapshot. + * + * In that vein, I think it's best to return 0 here given the + * unknowns of 'cow' implications. + */ + size_bytes: 0, + + // remove parent dataset details + snapshot_id: properties.name.value.replace( + new RegExp("^" + datasetParentName + "/"), + "" + ), + source_volume_id: source_volume_id, + //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto + creation_time: { + seconds: properties.creation.rawvalue, + nanos: 0, + }, + ready_to_use: true, + }, + }; + } + + /** + * In addition, if clones have been created from a snapshot, then they must + * be destroyed before the snapshot can be destroyed. + * + * @param {*} call + */ + async DeleteSnapshot(call) { + const driver = this; + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + const snapshot_id = call.request.snapshot_id; + + if (!snapshot_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot_id is required` + ); + } + + const detachedSnapshot = !zb.helpers.isZfsSnapshot(snapshot_id); + let datasetParentName; + + if (detachedSnapshot) { + datasetParentName = this.getDetachedSnapshotParentDatasetName(); + } else { + datasetParentName = this.getVolumeParentDatasetName(); + } + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + const fullSnapshotName = datasetParentName + "/" + snapshot_id; + + driver.ctx.logger.verbose("deleting snapshot: %s", fullSnapshotName); + + if (detachedSnapshot) { + try { + await httpApiClient.DatasetDelete(fullSnapshotName, { + recursive: true, + force: true, + }); + } catch (err) { + throw err; + } + } else { + try { + await httpApiClient.SnapshotDelete(fullSnapshotName, { + //recurse: true, + //force: true, + defer: true, + }); + } catch (err) { + if (err.toString().includes("snapshot has dependent clones")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "snapshot has dependent clones" + ); + } + throw err; + } + } + + // cleanup parent dataset if possible + if (detachedSnapshot) { + let containerDataset = + zb.helpers.extractParentDatasetName(fullSnapshotName); + try { + //await this.removeSnapshotsFromDatatset(containerDataset); + await httpApiClient.DatasetDelete(containerDataset); + } catch (err) { + if (!err.toString().includes("filesystem has children")) { + throw err; + } + } + } + + return {}; + } + + /** + * + * @param {*} call + */ + async ValidateVolumeCapabilities(call) { + const driver = this; + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { message: result.message }; + } + + return { + confirmed: { + volume_context: call.request.volume_context, + volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested + parameters: call.request.parameters, + }, + }; + } +} + +module.exports.FreeNASApiDriver = FreeNASApiDriver; diff --git a/src/driver/freenas/http/api.js b/src/driver/freenas/http/api.js new file mode 100644 index 0000000..d9a4881 --- /dev/null +++ b/src/driver/freenas/http/api.js @@ -0,0 +1,705 @@ +const { Zetabyte } = require("../../../utils/zfs"); + +// used for in-memory cache of the version info +const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version"; + +class Api { + constructor(client, cache, options = {}) { + this.client = client; + this.cache = cache; + this.options = options; + } + + async getHttpClient() { + return this.client; + } + + /** + * only here for the helpers + * @returns + */ + async getZetabyte() { + return new Zetabyte({ + executor: { + spawn: function () { + throw new Error( + "cannot use the zb implementation to execute zfs commands, must use the http api" + ); + }, + }, + }); + } + + async findResourceByProperties(endpoint, match) { + if (!match) { + return; + } + + if (typeof match === "object" && Object.keys(match).length < 1) { + return; + } + + const httpClient = await this.getHttpClient(); + let target; + let page = 0; + let lastReponse; + + // loop and find target + let queryParams = {}; + queryParams.limit = 100; + queryParams.offset = 0; + + while (!target) { + //Content-Range: items 0-2/3 (full set) + //Content-Range: items 0--1/3 (invalid offset) + if (queryParams.hasOwnProperty("offset")) { + queryParams.offset = queryParams.limit * page; + } + + // crude stoppage attempt + let response = await httpClient.get(endpoint, queryParams); + if (lastReponse) { + if (JSON.stringify(lastReponse) == JSON.stringify(response)) { + break; + } + } + lastReponse = response; + + if (response.statusCode == 200) { + if (response.body.length < 1) { + break; + } + response.body.some((i) => { + let isMatch = true; + + if (typeof match === "function") { + isMatch = match(i); + } else { + for (let property in match) { + if (match[property] != i[property]) { + isMatch = false; + break; + } + } + } + + if (isMatch) { + target = i; + return true; + } + + return false; + }); + } else { + throw new Error( + "FreeNAS http error - code: " + + response.statusCode + + " body: " + + JSON.stringify(response.body) + ); + } + page++; + } + + return target; + } + + async getApiVersion() { + const systemVersion = await this.getSystemVersion(); + + if (systemVersion.v2) { + if ((await this.getSystemVersionMajorMinor()) == 11.2) { + return 1; + } + return 2; + } + + return 1; + } + + async getIsFreeNAS() { + const systemVersion = await this.getSystemVersion(); + let version; + + if (systemVersion.v2) { + version = systemVersion.v2; + } else { + version = systemVersion.v1.fullversion; + } + + if (version.toLowerCase().includes("freenas")) { + return true; + } + + return false; + } + + async getIsTrueNAS() { + const systemVersion = await this.getSystemVersion(); + let version; + + if (systemVersion.v2) { + version = systemVersion.v2; + } else { + version = systemVersion.v1.fullversion; + } + + if (version.toLowerCase().includes("truenas")) { + return true; + } + + return false; + } + + async getIsScale() { + const systemVersion = await this.getSystemVersion(); + + if (systemVersion.v2 && systemVersion.v2.toLowerCase().includes("scale")) { + return true; + } + + return false; + } + + async getSystemVersionMajorMinor() { + const systemVersion = await this.getSystemVersion(); + let parts; + let parts_i; + let version; + + /* + systemVersion.v2 = "FreeNAS-11.2-U5"; + systemVersion.v2 = "TrueNAS-SCALE-20.11-MASTER-20201127-092915"; + systemVersion.v1 = { + fullversion: "FreeNAS-9.3-STABLE-201503200528", + fullversion: "FreeNAS-11.2-U5 (c129415c52)", + }; + + systemVersion.v2 = null; + */ + + if (systemVersion.v2) { + version = systemVersion.v2; + } else { + version = systemVersion.v1.fullversion; + } + + if (version) { + parts = version.split("-"); + parts_i = []; + parts.forEach((value) => { + let i = value.replace(/[^\d.]/g, ""); + if (i.length > 0) { + parts_i.push(i); + } + }); + + // join and resplit to deal with single elements which contain a decimal + parts_i = parts_i.join(".").split("."); + parts_i.splice(2); + return parts_i.join("."); + } + } + + async getSystemVersionMajor() { + const majorMinor = await this.getSystemVersionMajorMinor(); + return majorMinor.split(".")[0]; + } + + async setVersionInfoCache(versionInfo) { + await this.cache.set( + FREENAS_SYSTEM_VERSION_CACHE_KEY, + versionInfo, + 60 * 1000 + ); + } + + async getSystemVersion() { + let cacheData = await this.cache.get(FREENAS_SYSTEM_VERSION_CACHE_KEY); + + if (cacheData) { + return cacheData; + } + + const httpClient = await this.getHttpClient(false); + const endpoint = "/system/version/"; + let response; + const startApiVersion = httpClient.getApiVersion(); + const versionInfo = {}; + const versionErrors = {}; + const versionResponses = {}; + + httpClient.setApiVersion(2); + /** + * FreeNAS-11.2-U5 + * TrueNAS-12.0-RELEASE + * TrueNAS-SCALE-20.11-MASTER-20201127-092915 + */ + try { + response = await httpClient.get(endpoint); + versionResponses.v2 = response; + if (response.statusCode == 200) { + versionInfo.v2 = response.body; + + // return immediately to save on resources and silly requests + await this.setVersionInfoCache(versionInfo); + + // reset apiVersion + httpClient.setApiVersion(startApiVersion); + + return versionInfo; + } + } catch (e) { + // if more info is needed use e.stack + versionErrors.v2 = e.toString(); + } + + httpClient.setApiVersion(1); + /** + * {"fullversion": "FreeNAS-9.3-STABLE-201503200528", "name": "FreeNAS", "version": "9.3"} + * {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""} + */ + try { + response = await httpClient.get(endpoint); + versionResponses.v1 = response; + if (response.statusCode == 200 && IsJsonString(response.body)) { + versionInfo.v1 = response.body; + await this.setVersionInfoCache(versionInfo); + + // reset apiVersion + httpClient.setApiVersion(startApiVersion); + + return versionInfo; + } + } catch (e) { + // if more info is needed use e.stack + versionErrors.v1 = e.toString(); + } + + // throw error if cannot get v1 or v2 data + // likely bad creds/url + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS error getting system version info: ${JSON.stringify({ + errors: versionErrors, + responses: versionResponses, + })}` + ); + } + + getIsUserProperty(property) { + if (property.includes(":")) { + return true; + } + return false; + } + + getUserProperties(properties) { + let user_properties = {}; + for (const property in properties) { + if (this.getIsUserProperty(property)) { + user_properties[property] = properties[property]; + } + } + + return user_properties; + } + + getSystemProperties(properties) { + let system_properties = {}; + for (const property in properties) { + if (!this.getIsUserProperty(property)) { + system_properties[property] = properties[property]; + } + } + + return system_properties; + } + + getPropertiesKeyValueArray(properties) { + let arr = []; + for (const property in properties) { + arr.push({ key: property, value: properties[property] }); + } + + return arr; + } + + async DatasetCreate(datasetName, data) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + data.name = datasetName; + + endpoint = "/pool/dataset"; + response = await httpClient.post(endpoint, data); + + if (response.statusCode == 200) { + return; + } + + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes("already exists") + ) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + + /** + * + * @param {*} datasetName + * @param {*} data + * @returns + */ + async DatasetDelete(datasetName, data) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`; + response = await httpClient.delete(endpoint, data); + + if (response.statusCode == 200) { + return; + } + + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes("does not exist") + ) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + + async DatasetSet(datasetName, properties) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`; + response = await httpClient.put(endpoint, { + ...this.getSystemProperties(properties), + user_properties_update: this.getPropertiesKeyValueArray( + this.getUserProperties(properties) + ), + }); + + if (response.statusCode == 200) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + + async DatasetInherit(datasetName, property) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + let system_properties = {}; + let user_properties_update = []; + + const isUserProperty = this.getIsUserProperty(property); + if (isUserProperty) { + user_properties_update = [{ key: property, remove: true }]; + } else { + system_properties[property] = "INHERIT"; + } + + endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`; + response = await httpClient.put(endpoint, { + ...system_properties, + user_properties_update, + }); + + if (response.statusCode == 200) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + + /** + * + * zfs get -Hp all tank/k8s/test/PVC-111 + * + * @param {*} datasetName + * @param {*} properties + * @returns + */ + async DatasetGet(datasetName, properties) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`; + response = await httpClient.get(endpoint); + + if (response.statusCode == 200) { + let res = {}; + for (const property of properties) { + let p; + if (response.body.hasOwnProperty(property)) { + p = response.body[property]; + } else if (response.body.user_properties.hasOwnProperty(property)) { + p = response.body.user_properties[property]; + } else { + p = { + value: "-", + rawvalue: "-", + source: "-", + }; + } + + if (typeof p === "object" && p !== null) { + // nothing, leave as is + } else { + p = { + value: p, + rawvalue: p, + }; + } + + res[property] = p; + } + + return res; + } + + if (response.statusCode == 404) { + throw new Error("dataset does not exist"); + } + + throw new Error(JSON.stringify(response.body)); + } + + /** + * + * zfs get -Hp all tank/k8s/test/PVC-111 + * + * @param {*} snapshotName + * @param {*} properties + * @returns + */ + async SnapshotGet(snapshotName, properties) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`; + response = await httpClient.get(endpoint); + + if (response.statusCode == 200) { + let res = {}; + for (const property of properties) { + let p; + if (response.body.hasOwnProperty(property)) { + p = response.body[property]; + } else if (response.body.properties.hasOwnProperty(property)) { + p = response.body.properties[property]; + } else { + p = { + value: "-", + rawvalue: "-", + source: "-", + }; + } + + if (typeof p === "object" && p !== null) { + // nothing, leave as is + } else { + p = { + value: p, + rawvalue: p, + }; + } + + res[property] = p; + } + + return res; + } + + if (response.statusCode == 404) { + throw new Error("dataset does not exist"); + } + + throw new Error(JSON.stringify(response.body)); + } + + async SnapshotCreate(snapshotName, data = {}) { + const httpClient = await this.getHttpClient(false); + const zb = await this.getZetabyte(); + + let response; + let endpoint; + + const dataset = zb.helpers.extractDatasetName(snapshotName); + const snapshot = zb.helpers.extractSnapshotName(snapshotName); + + data.dataset = dataset; + data.name = snapshot; + + endpoint = "/zfs/snapshot"; + response = await httpClient.post(endpoint, data); + + if (response.statusCode == 200) { + return; + } + + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes("already exists") + ) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + + async SnapshotDelete(snapshotName, data = {}) { + const httpClient = await this.getHttpClient(false); + const zb = await this.getZetabyte(); + + let response; + let endpoint; + + endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`; + response = await httpClient.delete(endpoint, data); + + if (response.statusCode == 200) { + return; + } + + if (response.statusCode == 404) { + return; + } + + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes("not found") + ) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + + async CloneCreate(snapshotName, datasetName, data = {}) { + const httpClient = await this.getHttpClient(false); + const zb = await this.getZetabyte(); + + let response; + let endpoint; + + data.snapshot = snapshotName; + data.dataset_dst = datasetName; + + endpoint = "/zfs/snapshot/clone"; + response = await httpClient.post(endpoint, data); + + if (response.statusCode == 200) { + return; + } + + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes("already exists") + ) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + + // get all dataset snapshots + // https://github.com/truenas/middleware/pull/6934 + // then use core.bulk to delete all + + async ReplicationRunOnetime(data) { + const httpClient = await this.getHttpClient(false); + + let response; + let endpoint; + + endpoint = "/replication/run_onetime"; + response = await httpClient.post(endpoint, data); + + // 200 means the 'job' was accepted only + // must continue to check the status of the job to know when it has finished and if it was successful + // /core/get_jobs [["id", "=", jobidhere]] + if (response.statusCode == 200) { + return response.body; + } + + throw new Error(JSON.stringify(response.body)); + } + + async CoreGetJobs(data) { + const httpClient = await this.getHttpClient(false); + + let response; + let endpoint; + + endpoint = "/core/get_jobs"; + response = await httpClient.get(endpoint, data); + + // 200 means the 'job' was accepted only + // must continue to check the status of the job to know when it has finished and if it was successful + // /core/get_jobs [["id", "=", jobidhere]] + if (response.statusCode == 200) { + return response.body; + } + + throw new Error(JSON.stringify(response.body)); + } + + /** + * + * @param {*} data + */ + async FilesystemSetperm(data) { + /* + { + "path": "string", + "mode": "string", + "uid": 0, + "gid": 0, + "options": { + "stripacl": false, + "recursive": false, + "traverse": false + } + } + */ + + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + endpoint = `/filesystem/setperm`; + response = await httpClient.post(endpoint, data); + + if (response.statusCode == 200) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } +} + +function IsJsonString(str) { + try { + JSON.parse(str); + } catch (e) { + return false; + } + return true; +} + +module.exports.Api = Api; diff --git a/src/driver/freenas/index.js b/src/driver/freenas/ssh.js similarity index 100% rename from src/driver/freenas/index.js rename to src/driver/freenas/ssh.js From ad40b6a1efb2ab706244981ed6683c4c516f4945 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 15 Jun 2021 16:50:09 -0600 Subject: [PATCH 04/44] better handling if missing targetPortal config value Signed-off-by: Travis Glenn Hansen --- src/driver/controller-zfs-generic/index.js | 21 +++++++++++++-------- src/driver/freenas/ssh.js | 2 +- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/driver/controller-zfs-generic/index.js b/src/driver/controller-zfs-generic/index.js index 09d0c7b..de9f68c 100644 --- a/src/driver/controller-zfs-generic/index.js +++ b/src/driver/controller-zfs-generic/index.js @@ -47,8 +47,9 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver { ] ) { await zb.zfs.set(datasetName, { - [key]: this.options.nfs.shareStrategySetDatasetProperties - .properties[key], + [key]: + this.options.nfs.shareStrategySetDatasetProperties + .properties[key], }); } } @@ -114,8 +115,10 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver { if (this.options.iscsi.shareStrategyTargetCli.tpg.attributes) { for (const attributeName in this.options.iscsi .shareStrategyTargetCli.tpg.attributes) { - const attributeValue = this.options.iscsi - .shareStrategyTargetCli.tpg.attributes[attributeName]; + const attributeValue = + this.options.iscsi.shareStrategyTargetCli.tpg.attributes[ + attributeName + ]; setAttributesText += "\n"; setAttributesText += `set attribute ${attributeName}=${attributeValue}`; } @@ -124,8 +127,10 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver { if (this.options.iscsi.shareStrategyTargetCli.tpg.auth) { for (const attributeName in this.options.iscsi .shareStrategyTargetCli.tpg.auth) { - const attributeValue = this.options.iscsi - .shareStrategyTargetCli.tpg.auth[attributeName]; + const attributeValue = + this.options.iscsi.shareStrategyTargetCli.tpg.auth[ + attributeName + ]; setAttributesText += "\n"; setAttributesText += `set auth ${attributeName}=${attributeValue}`; } @@ -168,11 +173,11 @@ create /backstores/block/${iscsiName} volume_context = { node_attach_driver: "iscsi", - portal: this.options.iscsi.targetPortal, + portal: this.options.iscsi.targetPortal || "", portals: this.options.iscsi.targetPortals ? this.options.iscsi.targetPortals.join(",") : "", - interface: this.options.iscsi.interface, + interface: this.options.iscsi.interface || "", iqn: iqn, lun: 0, }; diff --git a/src/driver/freenas/ssh.js b/src/driver/freenas/ssh.js index e9987bc..b7b07f4 100644 --- a/src/driver/freenas/ssh.js +++ b/src/driver/freenas/ssh.js @@ -1234,7 +1234,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { volume_context = { node_attach_driver: "iscsi", - portal: this.options.iscsi.targetPortal, + portal: this.options.iscsi.targetPortal || "", portals: this.options.iscsi.targetPortals ? this.options.iscsi.targetPortals.join(",") : "", From 9a4d69defe7ba1cae3eb7100333714808f74c3a9 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 15 Jun 2021 16:50:42 -0600 Subject: [PATCH 05/44] small fix to help with mount detection on cifs volumes Signed-off-by: Travis Glenn Hansen --- src/utils/mount.js | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/utils/mount.js b/src/utils/mount.js index 7e50535..9058090 100644 --- a/src/utils/mount.js +++ b/src/utils/mount.js @@ -5,7 +5,8 @@ FINDMNT_COMMON_OPTIONS = [ "--output", "source,target,fstype,label,options,avail,size,used", "-b", - "-J" + "-J", + "--nofsroot", // prevents unwanted behavior with cifs volumes ]; class Mount { @@ -36,7 +37,7 @@ class Mount { if (!options.executor) { options.executor = { - spawn: cp.spawn + spawn: cp.spawn, }; } } @@ -179,8 +180,8 @@ class Mount { /** * very specifically looking for *devices* vs *filesystems/directories* which were bind mounted - * - * @param {*} path + * + * @param {*} path */ async isBindMountedBlockDevice(path) { const filesystem = new Filesystem(); @@ -290,7 +291,16 @@ class Mount { args.unshift(command); command = mount.options.paths.sudo; } - console.log("executing mount command: %s %s", command, args.join(" ")); + // https://regex101.com/r/FHIbcw/3 + // replace password=foo with password=redacted + // (?<=password=)(?:([\"'])(?:\\\1|.)*?\1|[^,\s]+) + const regex = /(?<=password=)(?:([\"'])(?:\\\1|.)*?\1|[^,\s]+)/gi; + const cleansedLog = `${command} ${args.join(" ")}`.replace( + regex, + "redacted" + ); + + console.log("executing mount command: %s", cleansedLog); const child = mount.options.executor.spawn(command, args, options); let didTimeout = false; @@ -302,15 +312,15 @@ class Mount { } return new Promise((resolve, reject) => { - child.stdout.on("data", function(data) { + child.stdout.on("data", function (data) { stdout = stdout + data; }); - child.stderr.on("data", function(data) { + child.stderr.on("data", function (data) { stderr = stderr + data; }); - child.on("close", function(code) { + child.on("close", function (code) { const result = { code, stdout, stderr }; if (timeout) { clearTimeout(timeout); From 75abd5e13bde641617f4ae5eea29e5eded4fb086 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 15 Jun 2021 16:51:18 -0600 Subject: [PATCH 06/44] fix invalid variable reference Signed-off-by: Travis Glenn Hansen --- src/driver/controller-zfs-ssh/index.js | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/driver/controller-zfs-ssh/index.js b/src/driver/controller-zfs-ssh/index.js index a61333f..51c5752 100644 --- a/src/driver/controller-zfs-ssh/index.js +++ b/src/driver/controller-zfs-ssh/index.js @@ -1492,7 +1492,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { let types = []; const volumeParentDatasetName = this.getVolumeParentDatasetName(); - const snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName(); + const snapshotParentDatasetName = + this.getDetachedSnapshotParentDatasetName(); // get data from cache and return immediately if (starting_token) { @@ -1618,7 +1619,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { } throw new GrpcError(grpc.status.NOT_FOUND, message); } - throw new GrpcError(grpc.status.FAILED_PRECONDITION, e.toString()); + throw new GrpcError(grpc.status.FAILED_PRECONDITION, err.toString()); } response.indexed.forEach((row) => { @@ -1771,9 +1772,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { const datasetName = datasetParentName + "/" + source_volume_id; snapshotProperties[SNAPSHOT_CSI_NAME_PROPERTY_NAME] = name; - snapshotProperties[ - SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME - ] = source_volume_id; + snapshotProperties[SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME] = + source_volume_id; snapshotProperties[MANAGED_PROPERTY_NAME] = "true"; driver.ctx.logger.verbose("requested snapshot name: %s", name); @@ -1995,9 +1995,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { // cleanup parent dataset if possible if (detachedSnapshot) { - let containerDataset = zb.helpers.extractParentDatasetName( - fullSnapshotName - ); + let containerDataset = + zb.helpers.extractParentDatasetName(fullSnapshotName); try { await this.removeSnapshotsFromDatatset(containerDataset); await zb.zfs.destroy(containerDataset); From ff659d80047ab7132481bbc08d2385334f6b16b7 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 15 Jun 2021 16:51:46 -0600 Subject: [PATCH 07/44] implement remaining bits of truenas pure api-based driver Signed-off-by: Travis Glenn Hansen --- src/driver/freenas/api.js | 1086 ++++++++++++++++++++++++++++++-- src/driver/freenas/http/api.js | 128 ++-- 2 files changed, 1109 insertions(+), 105 deletions(-) diff --git a/src/driver/freenas/api.js b/src/driver/freenas/api.js index 637b1d2..4e15a25 100644 --- a/src/driver/freenas/api.js +++ b/src/driver/freenas/api.js @@ -3,6 +3,7 @@ const { CsiBaseDriver } = require("../index"); const HttpClient = require("./http").Client; const TrueNASApiClient = require("./http/api").Api; const { Zetabyte } = require("../../utils/zfs"); +const sleep = require("../../utils/general").sleep; const Handlebars = require("handlebars"); const uuidv4 = require("uuid").v4; @@ -41,14 +42,6 @@ const VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME = const VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME = "democratic-csi:volume_context_provisioner_instance_id"; -function isPropertyValueSet(value) { - if (value === undefined || value === null || value == "" || value == "-") { - return false; - } - - return true; -} - class FreeNASApiDriver extends CsiBaseDriver { constructor(ctx, options) { super(...arguments); @@ -1221,7 +1214,7 @@ class FreeNASApiDriver extends CsiBaseDriver { volume_context = { node_attach_driver: "iscsi", - portal: this.options.iscsi.targetPortal, + portal: this.options.iscsi.targetPortal || "", portals: this.options.iscsi.targetPortals ? this.options.iscsi.targetPortals.join(",") : "", @@ -1587,10 +1580,32 @@ class FreeNASApiDriver extends CsiBaseDriver { } } - async expandVolume(call, datasetName) { - const driverShareType = this.getDriverShareType(); + async removeSnapshotsFromDatatset(datasetName, options = {}) { + const httpClient = await this.getHttpClient(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + let response; + let endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`; + response = await httpClient.get(endpoint, { "extra.snapshots": 1 }); + + //console.log(response); + + if (response.statusCode == 404) { + return; + } + if (response.statusCode == 200) { + for (let snapshot of response.body.snapshots) { + await httpApiClient.SnapshotDelete(snapshot.name); + } + return; + } + + throw new Error("unhandled statusCode: " + response.statusCode); + } + + async expandVolume(call, datasetName) { return; + const driverShareType = this.getDriverShareType(); const sshClient = this.getSshClient(); switch (driverShareType) { @@ -1628,6 +1643,108 @@ class FreeNASApiDriver extends CsiBaseDriver { } } + async getVolumeStatus(volume_id) { + const driver = this; + + if (!!!semver.satisfies(driver.ctx.csiVersion, ">=1.2.0")) { + return; + } + + let abnormal = false; + let message = "OK"; + let volume_status = {}; + + //LIST_VOLUMES_PUBLISHED_NODES + if ( + semver.satisfies(driver.ctx.csiVersion, ">=1.2.0") && + driver.options.service.controller.capabilities.rpc.includes( + "LIST_VOLUMES_PUBLISHED_NODES" + ) + ) { + // TODO: let drivers fill this in + volume_status.published_node_ids = []; + } + + //VOLUME_CONDITION + if ( + semver.satisfies(driver.ctx.csiVersion, ">=1.3.0") && + driver.options.service.controller.capabilities.rpc.includes( + "VOLUME_CONDITION" + ) + ) { + // TODO: let drivers fill ths in + volume_condition = { abnormal, message }; + volume_status.volume_condition = volume_condition; + } + + return volume_status; + } + + async populateCsiVolumeFromData(row) { + const driver = this; + const zb = await this.getZetabyte(); + const driverZfsResourceType = this.getDriverZfsResourceType(); + let datasetName = this.getVolumeParentDatasetName(); + + // ignore rows were csi_name is empty + if (row[MANAGED_PROPERTY_NAME] != "true") { + return; + } + + let volume_content_source; + let volume_context = JSON.parse(row[SHARE_VOLUME_CONTEXT_PROPERTY_NAME]); + if ( + zb.helpers.isPropertyValueSet( + row[VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME] + ) + ) { + volume_context["provisioner_driver"] = + row[VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME]; + } + + if ( + zb.helpers.isPropertyValueSet( + row[VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME] + ) + ) { + volume_context["provisioner_driver_instance_id"] = + row[VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME]; + } + + if ( + zb.helpers.isPropertyValueSet( + row[VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME] + ) + ) { + volume_content_source = {}; + switch (row[VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME]) { + case "snapshot": + volume_content_source.snapshot = {}; + volume_content_source.snapshot.snapshot_id = + row[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME]; + break; + case "volume": + volume_content_source.volume = {}; + volume_content_source.volume.volume_id = + row[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME]; + break; + } + } + + let volume = { + // remove parent dataset info + volume_id: row["name"].replace(new RegExp("^" + datasetName + "/"), ""), + capacity_bytes: + driverZfsResourceType == "filesystem" + ? row["refquota"] + : row["volsize"], + content_source: volume_content_source, + volume_context, + }; + + return volume; + } + /** * cannot make this a storage class parameter as storage class/etc context is *not* sent * into various calls such as GetControllerCapabilities etc @@ -2008,15 +2125,50 @@ class FreeNASApiDriver extends CsiBaseDriver { if (detachedClone) { try { - // TODO: fix this - response = await zb.zfs.send_receive( - fullSnapshotName, - [], - datasetName, - [] - ); + response = await httpApiClient.ReplicationRunOnetime({ + direction: "PUSH", + transport: "LOCAL", + source_datasets: [ + zb.helpers.extractDatasetName(fullSnapshotName), + ], + target_dataset: datasetName, + name_regex: zb.helpers.extractSnapshotName(fullSnapshotName), + recursive: false, + retention_policy: "NONE", + readonly: "IGNORE", + properties: false, + }); - response = await httpApiClient.DatasetSet(datasetName, volumeProperties); + let job_id = response; + let job; + + // wait for job to finish + while ( + !job || + !["SUCCESS", "ABORTED", "FAILED"].includes(job.state) + ) { + job = await httpApiClient.CoreGetJobs({ id: job_id }); + job = job[0]; + await sleep(3000); + } + + switch (job.state) { + case "SUCCESS": + break; + case "FAILED": + // TODO: handle scenarios where the dataset + break; + case "ABORTED": + // TODO: handle this + break; + default: + break; + } + + response = await httpApiClient.DatasetSet( + datasetName, + volumeProperties + ); } catch (err) { if ( err.toString().includes("destination") && @@ -2118,13 +2270,45 @@ class FreeNASApiDriver extends CsiBaseDriver { if (detachedClone) { try { - // TODO: fix this - response = await zb.zfs.send_receive( - fullSnapshotName, - [], - datasetName, - [] - ); + response = await httpApiClient.ReplicationRunOnetime({ + direction: "PUSH", + transport: "LOCAL", + source_datasets: [ + zb.helpers.extractDatasetName(fullSnapshotName), + ], + target_dataset: datasetName, + name_regex: zb.helpers.extractSnapshotName(fullSnapshotName), + recursive: false, + retention_policy: "NONE", + readonly: "IGNORE", + properties: false, + }); + + let job_id = response; + let job; + + // wait for job to finish + while ( + !job || + !["SUCCESS", "ABORTED", "FAILED"].includes(job.state) + ) { + job = await httpApiClient.CoreGetJobs({ id: job_id }); + job = job[0]; + await sleep(3000); + } + + switch (job.state) { + case "SUCCESS": + break; + case "FAILED": + // TODO: handle scenarios where the dataset + break; + case "ABORTED": + // TODO: handle this + break; + default: + break; + } } catch (err) { if ( err.toString().includes("destination") && @@ -2136,7 +2320,10 @@ class FreeNASApiDriver extends CsiBaseDriver { } } - response = await httpApiClient.DatasetSet(datasetName, volumeProperties); + response = await httpApiClient.DatasetSet( + datasetName, + volumeProperties + ); // remove snapshots from target await this.removeSnapshotsFromDatatset(datasetName, { @@ -2151,9 +2338,13 @@ class FreeNASApiDriver extends CsiBaseDriver { // create clone // zfs origin property contains parent info, ie: pool0/k8s/test/PVC-111@clone-test try { - response = await httpApiClient.CloneCreate(fullSnapshotName, datasetName, { - dataset_properties: volumeProperties, - }); + response = await httpApiClient.CloneCreate( + fullSnapshotName, + datasetName, + { + dataset_properties: volumeProperties, + } + ); } catch (err) { if (err.toString().includes("dataset does not exist")) { throw new GrpcError( @@ -2221,10 +2412,6 @@ class FreeNASApiDriver extends CsiBaseDriver { await httpApiClient.DatasetSet(datasetName, properties); } - //datasetPermissionsMode: 0777, - //datasetPermissionsUser: "root", - //datasetPermissionsGroup: "wheel", - // get properties needed for remaining calls properties = await httpApiClient.DatasetGet(datasetName, [ "mountpoint", @@ -2402,9 +2589,7 @@ class FreeNASApiDriver extends CsiBaseDriver { ); try { - await zb.zfs.destroy(properties.origin.value, { - recurse: true, - force: true, + await httpApiClient.SnapshotDelete(properties.origin.value, { defer: true, }); } catch (err) { @@ -2440,6 +2625,773 @@ class FreeNASApiDriver extends CsiBaseDriver { return {}; } + /** + * + * @param {*} call + */ + async ControllerExpandVolume(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let name = call.request.volume_id; + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + const datasetName = datasetParentName + "/" + name; + + let capacity_bytes = + call.request.capacity_range.required_bytes || + call.request.capacity_range.limit_bytes; + if (!capacity_bytes) { + //should never happen, value must be set + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume capacity is required (either required_bytes or limit_bytes)` + ); + } + + if (capacity_bytes && driverZfsResourceType == "volume") { + //make sure to align capacity_bytes with zvol blocksize + //volume size must be a multiple of volume block size + let properties = await httpApiClient.DatasetGet(datasetName, [ + "volblocksize", + ]); + capacity_bytes = zb.helpers.generateZvolSize( + capacity_bytes, + properties.volblocksize.rawvalue + ); + } + + if ( + call.request.capacity_range.required_bytes > 0 && + call.request.capacity_range.limit_bytes > 0 && + call.request.capacity_range.required_bytes > + call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `required_bytes is greather than limit_bytes` + ); + } + + // ensure *actual* capacity is not greater than limit + if ( + call.request.capacity_range.limit_bytes && + call.request.capacity_range.limit_bytes > 0 && + capacity_bytes > call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required volume capacity is greater than limit` + ); + } + + let setProps = false; + let properties = {}; + + switch (driverZfsResourceType) { + case "filesystem": + // set quota + if (this.options.zfs.datasetEnableQuotas) { + setProps = true; + properties.refquota = capacity_bytes; + } + + // set reserve + if (this.options.zfs.datasetEnableReservation) { + setProps = true; + properties.refreservation = capacity_bytes; + } + break; + case "volume": + properties.volsize = capacity_bytes; + setProps = true; + + if (this.options.zfs.zvolEnableReservation) { + properties.refreservation = capacity_bytes; + } + break; + } + + if (setProps) { + await httpApiClient.DatasetSet(datasetName, properties); + } + + await this.expandVolume(call, datasetName); + + return { + capacity_bytes: + this.options.zfs.datasetEnableQuotas || + driverZfsResourceType == "volume" + ? capacity_bytes + : 0, + node_expansion_required: driverZfsResourceType == "volume" ? true : false, + }; + } + + /** + * TODO: consider volume_capabilities? + * + * @param {*} call + */ + async GetCapacity(call) { + const driver = this; + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { available_capacity: 0 }; + } + } + + const datasetName = datasetParentName; + + let properties; + properties = await httpApiClient.DatasetGet(datasetName, ["available"]); + + return { available_capacity: Number(properties.available.rawvalue) }; + } + + /** + * Get a single volume + * + * @param {*} call + */ + async ControllerGetVolume(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let response; + let name = call.request.volume_id; + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + const datasetName = datasetParentName + "/" + name; + + try { + response = await httpApiClient.DatasetGet(datasetName, [ + "name", + "mountpoint", + "refquota", + "available", + "used", + VOLUME_CSI_NAME_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME, + "volsize", + MANAGED_PROPERTY_NAME, + SHARE_VOLUME_CONTEXT_PROPERTY_NAME, + SUCCESS_PROPERTY_NAME, + VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME, + VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME, + ]); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError(grpc.status.NOT_FOUND, `volume_id is missing`); + } + + throw err; + } + + let row = {}; + for (let p in response) { + row[p] = response[p].rawvalue; + } + + driver.ctx.logger.debug("list volumes result: %j", row); + let volume = await driver.populateCsiVolumeFromData(row); + let status = await driver.getVolumeStatus(datasetName); + + let res = { volume }; + if (status) { + res.status = status; + } + + return res; + } + + /** + * + * TODO: check capability to ensure not asking about block volumes + * + * @param {*} call + */ + async ListVolumes(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const httpClient = await this.getHttpClient(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let entries = []; + let entries_length = 0; + let next_token; + let uuid, page, next_page; + let response; + let endpoint; + + const max_entries = call.request.max_entries; + const starting_token = call.request.starting_token; + + // get data from cache and return immediately + if (starting_token) { + let parts = starting_token.split(":"); + uuid = parts[0]; + page = parseInt(parts[1]); + entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`); + if (entries) { + entries = JSON.parse(JSON.stringify(entries)); + entries_length = entries.length; + entries = entries.splice((page - 1) * max_entries, max_entries); + if (page * max_entries < entries_length) { + next_page = page + 1; + next_token = `${uuid}:${next_page}`; + } else { + next_token = null; + } + const data = { + entries: entries, + next_token: next_token, + }; + + return data; + } else { + // TODO: throw error / cache expired + } + } + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + const datasetName = datasetParentName; + const rows = []; + + endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`; + response = await httpClient.get(endpoint); + + //console.log(response); + + if (response.statusCode == 404) { + return { + entries: [], + next_token: null, + }; + } + if (response.statusCode == 200) { + for (let child of response.body.children) { + let child_properties = httpApiClient.normalizeProperties(child, [ + "name", + "mountpoint", + "refquota", + "available", + "used", + VOLUME_CSI_NAME_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME, + "volsize", + MANAGED_PROPERTY_NAME, + SHARE_VOLUME_CONTEXT_PROPERTY_NAME, + SUCCESS_PROPERTY_NAME, + VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME, + VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME, + ]); + + let row = {}; + for (let p in child_properties) { + row[p] = child_properties[p].rawvalue; + } + + rows.push(row); + } + } + + driver.ctx.logger.debug("list volumes result: %j", rows); + + entries = []; + for (let row of rows) { + // ignore rows were csi_name is empty + if (row[MANAGED_PROPERTY_NAME] != "true") { + return; + } + + let volume_id = row["name"].replace( + new RegExp("^" + datasetName + "/"), + "" + ); + + let volume = await driver.populateCsiVolumeFromData(row); + let status = await driver.getVolumeStatus(volume_id); + + entries.push({ + volume, + status, + }); + } + + if (max_entries && entries.length > max_entries) { + uuid = uuidv4(); + this.ctx.cache.set( + `ListVolumes:result:${uuid}`, + JSON.parse(JSON.stringify(entries)) + ); + next_token = `${uuid}:2`; + entries = entries.splice(0, max_entries); + } + + const data = { + entries: entries, + next_token: next_token, + }; + + return data; + } + + /** + * + * @param {*} call + */ + async ListSnapshots(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const httpClient = await this.getHttpClient(); + const httpApiClient = await this.getTrueNASHttpApiClient(); + const zb = await this.getZetabyte(); + + let entries = []; + let entries_length = 0; + let next_token; + let uuid, page, next_page; + + const max_entries = call.request.max_entries; + const starting_token = call.request.starting_token; + + let types = []; + + const volumeParentDatasetName = this.getVolumeParentDatasetName(); + const snapshotParentDatasetName = + this.getDetachedSnapshotParentDatasetName(); + + // get data from cache and return immediately + if (starting_token) { + let parts = starting_token.split(":"); + uuid = parts[0]; + page = parseInt(parts[1]); + entries = this.ctx.cache.get(`ListSnapshots:result:${uuid}`); + if (entries) { + entries = JSON.parse(JSON.stringify(entries)); + entries_length = entries.length; + entries = entries.splice((page - 1) * max_entries, max_entries); + if (page * max_entries < entries_length) { + next_page = page + 1; + next_token = `${uuid}:${next_page}`; + } else { + next_token = null; + } + const data = { + entries: entries, + next_token: next_token, + }; + + return data; + } else { + // TODO: throw error / cache expired + } + } + + if (!volumeParentDatasetName) { + // throw error + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + let snapshot_id = call.request.snapshot_id; + let source_volume_id = call.request.source_volume_id; + + entries = []; + for (let loopType of ["snapshot", "filesystem"]) { + let endpoint, response, operativeFilesystem, operativeFilesystemType; + let datasetParentName; + switch (loopType) { + case "snapshot": + datasetParentName = volumeParentDatasetName; + types = ["snapshot"]; + // should only send 1 of snapshot_id or source_volume_id, preferring the former if sent + if (snapshot_id) { + if (!zb.helpers.isZfsSnapshot(snapshot_id)) { + continue; + } + operativeFilesystem = volumeParentDatasetName + "/" + snapshot_id; + operativeFilesystemType = 3; + } else if (source_volume_id) { + operativeFilesystem = + volumeParentDatasetName + "/" + source_volume_id; + operativeFilesystemType = 2; + } else { + operativeFilesystem = volumeParentDatasetName; + operativeFilesystemType = 1; + } + break; + case "filesystem": + datasetParentName = snapshotParentDatasetName; + if (!datasetParentName) { + continue; + } + if (driverZfsResourceType == "filesystem") { + types = ["filesystem"]; + } else { + types = ["volume"]; + } + + // should only send 1 of snapshot_id or source_volume_id, preferring the former if sent + if (snapshot_id) { + if (zb.helpers.isZfsSnapshot(snapshot_id)) { + continue; + } + operativeFilesystem = snapshotParentDatasetName + "/" + snapshot_id; + operativeFilesystemType = 3; + } else if (source_volume_id) { + operativeFilesystem = + snapshotParentDatasetName + "/" + source_volume_id; + operativeFilesystemType = 2; + } else { + operativeFilesystem = snapshotParentDatasetName; + operativeFilesystemType = 1; + } + break; + } + + let rows = []; + + try { + let zfsProperties = [ + "name", + "creation", + "mountpoint", + "refquota", + "available", + "used", + VOLUME_CSI_NAME_PROPERTY_NAME, + SNAPSHOT_CSI_NAME_PROPERTY_NAME, + MANAGED_PROPERTY_NAME, + ]; + /* + response = await zb.zfs.list( + operativeFilesystem, + , + { types, recurse: true } + ); + */ + + //console.log(types, operativeFilesystem, operativeFilesystemType); + + if (types.includes("snapshot")) { + switch (operativeFilesystemType) { + case 3: + // get explicit snapshot + response = await httpApiClient.SnapshotGet( + operativeFilesystem, + zfsProperties + ); + + let row = {}; + for (let p in response) { + row[p] = response[p].rawvalue; + } + rows.push(row); + break; + case 2: + // get snapshots connected to the to source_volume_id + endpoint = `/pool/dataset/id/${encodeURIComponent( + operativeFilesystem + )}`; + response = await httpClient.get(endpoint, { + "extra.snapshots": 1, + }); + if (response.statusCode == 404) { + throw new Error("dataset does not exist"); + } else if (response.statusCode == 200) { + for (let snapshot of response.body.snapshots) { + let i_response = await httpApiClient.SnapshotGet( + snapshot.name, + zfsProperties + ); + let row = {}; + for (let p in i_response) { + row[p] = i_response[p].rawvalue; + } + rows.push(row); + } + } else { + throw new Error(`unhandled statusCode: ${response.statusCode}`); + } + break; + case 1: + // get all snapshot recursively from the parent dataset + endpoint = `/pool/dataset/id/${encodeURIComponent( + operativeFilesystem + )}`; + response = await httpClient.get(endpoint, { + "extra.snapshots": 1, + }); + if (response.statusCode == 404) { + throw new Error("dataset does not exist"); + } else if (response.statusCode == 200) { + for (let child of response.body.children) { + for (let snapshot of child.snapshots) { + let i_response = await httpApiClient.SnapshotGet( + snapshot.name, + zfsProperties + ); + let row = {}; + for (let p in i_response) { + row[p] = i_response[p].rawvalue; + } + rows.push(row); + } + } + } else { + throw new Error(`unhandled statusCode: ${response.statusCode}`); + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid operativeFilesystemType [${operativeFilesystemType}]` + ); + break; + } + } else if (types.includes("filesystem") || types.includes("volume")) { + switch (operativeFilesystemType) { + case 3: + // get explicit snapshot + response = await httpApiClient.DatasetGet( + operativeFilesystem, + zfsProperties + ); + + let row = {}; + for (let p in response) { + row[p] = response[p].rawvalue; + } + rows.push(row); + break; + case 2: + // get snapshots connected to the to source_volume_id + endpoint = `/pool/dataset/id/${encodeURIComponent( + operativeFilesystem + )}`; + response = await httpClient.get(endpoint); + if (response.statusCode == 404) { + throw new Error("dataset does not exist"); + } else if (response.statusCode == 200) { + for (let child of response.body.children) { + let i_response = httpApiClient.normalizeProperties( + child, + zfsProperties + ); + let row = {}; + for (let p in i_response) { + row[p] = i_response[p].rawvalue; + } + rows.push(row); + } + } else { + throw new Error(`unhandled statusCode: ${response.statusCode}`); + } + break; + case 1: + // get all snapshot recursively from the parent dataset + endpoint = `/pool/dataset/id/${encodeURIComponent( + operativeFilesystem + )}`; + response = await httpClient.get(endpoint); + if (response.statusCode == 404) { + throw new Error("dataset does not exist"); + } else if (response.statusCode == 200) { + for (let child of response.body.children) { + for (let grandchild of child.children) { + let i_response = httpApiClient.normalizeProperties( + grandchild, + zfsProperties + ); + let row = {}; + for (let p in i_response) { + row[p] = i_response[p].rawvalue; + } + rows.push(row); + } + } + } else { + throw new Error(`unhandled statusCode: ${response.statusCode}`); + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid operativeFilesystemType [${operativeFilesystemType}]` + ); + break; + } + } else { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid zfs types [${types.join(",")}]` + ); + } + } catch (err) { + let message; + if (err.toString().includes("dataset does not exist")) { + switch (operativeFilesystemType) { + case 1: + //message = `invalid configuration: datasetParentName ${datasetParentName} does not exist`; + continue; + break; + case 2: + message = `source_volume_id ${source_volume_id} does not exist`; + break; + case 3: + message = `snapshot_id ${snapshot_id} does not exist`; + break; + } + throw new GrpcError(grpc.status.NOT_FOUND, message); + } + throw new GrpcError(grpc.status.FAILED_PRECONDITION, err.toString()); + } + + rows.forEach((row) => { + // skip any snapshots not explicitly created by CO + if (row[MANAGED_PROPERTY_NAME] != "true") { + return; + } + + // ignore snapshots that are not explicit CO snapshots + if ( + !zb.helpers.isPropertyValueSet(row[SNAPSHOT_CSI_NAME_PROPERTY_NAME]) + ) { + return; + } + + // strip parent dataset + let source_volume_id = row["name"].replace( + new RegExp("^" + datasetParentName + "/"), + "" + ); + + // strip snapshot details (@snapshot-name) + if (source_volume_id.includes("@")) { + source_volume_id = source_volume_id.substring( + 0, + source_volume_id.indexOf("@") + ); + } else { + source_volume_id = source_volume_id.replace( + new RegExp("/" + row[SNAPSHOT_CSI_NAME_PROPERTY_NAME] + "$"), + "" + ); + } + + if (source_volume_id == datasetParentName) { + return; + } + + if (source_volume_id) + entries.push({ + snapshot: { + /** + * The purpose of this field is to give CO guidance on how much space + * is needed to create a volume from this snapshot. + * + * In that vein, I think it's best to return 0 here given the + * unknowns of 'cow' implications. + */ + size_bytes: 0, + + // remove parent dataset details + snapshot_id: row["name"].replace( + new RegExp("^" + datasetParentName + "/"), + "" + ), + source_volume_id: source_volume_id, + //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto + creation_time: { + seconds: zb.helpers.isPropertyValueSet(row["creation"]) + ? row["creation"] + : 0, + nanos: 0, + }, + ready_to_use: true, + }, + }); + }); + } + + if (max_entries && entries.length > max_entries) { + uuid = uuidv4(); + this.ctx.cache.set( + `ListSnapshots:result:${uuid}`, + JSON.parse(JSON.stringify(entries)) + ); + next_token = `${uuid}:2`; + entries = entries.splice(0, max_entries); + } + + const data = { + entries: entries, + next_token: next_token, + }; + + return data; + } + /** * * @param {*} call @@ -2552,10 +3504,12 @@ class FreeNASApiDriver extends CsiBaseDriver { name; snapshotDatasetName = datasetName + "/" + name; + // create target dataset await httpApiClient.DatasetCreate(datasetName, { create_ancestors: true, }); + // create snapshot on source try { await httpApiClient.SnapshotCreate(tmpSnapshotName); } catch (err) { @@ -2570,16 +3524,45 @@ class FreeNASApiDriver extends CsiBaseDriver { } try { - //TODO: get the value from the response and wait for the job to finish + // copy data from source snapshot to target dataset response = await httpApiClient.ReplicationRunOnetime({ direction: "PUSH", transport: "LOCAL", - source_datasets: [tmpSnapshotName], + source_datasets: [zb.helpers.extractDatasetName(tmpSnapshotName)], target_dataset: snapshotDatasetName, + name_regex: zb.helpers.extractSnapshotName(tmpSnapshotName), recursive: false, - retention_policy: null, + retention_policy: "NONE", + readonly: "IGNORE", + properties: false, }); + let job_id = response; + let job; + + // wait for job to finish + while (!job || !["SUCCESS", "ABORTED", "FAILED"].includes(job.state)) { + job = await httpApiClient.CoreGetJobs({ id: job_id }); + job = job[0]; + await sleep(3000); + } + + switch (job.state) { + case "SUCCESS": + break; + case "FAILED": + // TODO: handle scenarios where the dataset + break; + case "ABORTED": + // TODO: handle this + break; + default: + break; + } + + //throw new Error("foobar"); + + // set properties on target dataset response = await httpApiClient.DatasetSet( snapshotDatasetName, snapshotProperties @@ -2601,16 +3584,12 @@ class FreeNASApiDriver extends CsiBaseDriver { "@" + zb.helpers.extractSnapshotName(tmpSnapshotName), { - //recurse: true, - //force: true, defer: true, } ); // remove snapshot from source await httpApiClient.SnapshotDelete(tmpSnapshotName, { - //recurse: true, - //force: true, defer: true, }); } else { @@ -2636,7 +3615,7 @@ class FreeNASApiDriver extends CsiBaseDriver { "creation", "mountpoint", "refquota", - "avail", + "available", "used", VOLUME_CSI_NAME_PROPERTY_NAME, SNAPSHOT_CSI_NAME_PROPERTY_NAME, @@ -2661,6 +3640,15 @@ class FreeNASApiDriver extends CsiBaseDriver { // set this just before sending out response so we know if volume completed // this should give us a relatively sane way to clean up artifacts over time //await zb.zfs.set(fullSnapshotName, { [SUCCESS_PROPERTY_NAME]: "true" }); + if (detachedSnapshot) { + await httpApiClient.DatasetSet(fullSnapshotName, { + [SUCCESS_PROPERTY_NAME]: "true", + }); + } else { + await httpApiClient.SnapshotSet(fullSnapshotName, { + [SUCCESS_PROPERTY_NAME]: "true", + }); + } return { snapshot: { @@ -2681,7 +3669,9 @@ class FreeNASApiDriver extends CsiBaseDriver { source_volume_id: source_volume_id, //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto creation_time: { - seconds: properties.creation.rawvalue, + seconds: zb.helpers.isPropertyValueSet(properties.creation.rawvalue) + ? properties.creation.rawvalue + : 0, nanos: 0, }, ready_to_use: true, @@ -2741,8 +3731,6 @@ class FreeNASApiDriver extends CsiBaseDriver { } else { try { await httpApiClient.SnapshotDelete(fullSnapshotName, { - //recurse: true, - //force: true, defer: true, }); } catch (err) { @@ -2761,7 +3749,7 @@ class FreeNASApiDriver extends CsiBaseDriver { let containerDataset = zb.helpers.extractParentDatasetName(fullSnapshotName); try { - //await this.removeSnapshotsFromDatatset(containerDataset); + await this.removeSnapshotsFromDatatset(containerDataset); await httpApiClient.DatasetDelete(containerDataset); } catch (err) { if (!err.toString().includes("filesystem has children")) { diff --git a/src/driver/freenas/http/api.js b/src/driver/freenas/http/api.js index d9a4881..8607697 100644 --- a/src/driver/freenas/http/api.js +++ b/src/driver/freenas/http/api.js @@ -325,6 +325,46 @@ class Api { return arr; } + normalizeProperties(dataset, properties) { + let res = {}; + for (const property of properties) { + let p; + if (dataset.hasOwnProperty(property)) { + p = dataset[property]; + } else if ( + dataset.properties && + dataset.properties.hasOwnProperty(property) + ) { + p = dataset.properties[property]; + } else if ( + dataset.user_properties && + dataset.user_properties.hasOwnProperty(property) + ) { + p = dataset.user_properties[property]; + } else { + p = { + value: "-", + rawvalue: "-", + source: "-", + }; + } + + if (typeof p === "object" && p !== null) { + // nothing, leave as is + } else { + p = { + value: p, + rawvalue: p, + source: "-", + }; + } + + res[property] = p; + } + + return res; + } + async DatasetCreate(datasetName, data) { const httpClient = await this.getHttpClient(false); let response; @@ -441,34 +481,7 @@ class Api { response = await httpClient.get(endpoint); if (response.statusCode == 200) { - let res = {}; - for (const property of properties) { - let p; - if (response.body.hasOwnProperty(property)) { - p = response.body[property]; - } else if (response.body.user_properties.hasOwnProperty(property)) { - p = response.body.user_properties[property]; - } else { - p = { - value: "-", - rawvalue: "-", - source: "-", - }; - } - - if (typeof p === "object" && p !== null) { - // nothing, leave as is - } else { - p = { - value: p, - rawvalue: p, - }; - } - - res[property] = p; - } - - return res; + return this.normalizeProperties(response.body, properties); } if (response.statusCode == 404) { @@ -478,6 +491,26 @@ class Api { throw new Error(JSON.stringify(response.body)); } + async SnapshotSet(snapshotName, properties) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`; + response = await httpClient.put(endpoint, { + //...this.getSystemProperties(properties), + user_properties_update: this.getPropertiesKeyValueArray( + this.getUserProperties(properties) + ), + }); + + if (response.statusCode == 200) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + /** * * zfs get -Hp all tank/k8s/test/PVC-111 @@ -495,34 +528,7 @@ class Api { response = await httpClient.get(endpoint); if (response.statusCode == 200) { - let res = {}; - for (const property of properties) { - let p; - if (response.body.hasOwnProperty(property)) { - p = response.body[property]; - } else if (response.body.properties.hasOwnProperty(property)) { - p = response.body.properties[property]; - } else { - p = { - value: "-", - rawvalue: "-", - source: "-", - }; - } - - if (typeof p === "object" && p !== null) { - // nothing, leave as is - } else { - p = { - value: p, - rawvalue: p, - }; - } - - res[property] = p; - } - - return res; + return this.normalizeProperties(response.body, properties); } if (response.statusCode == 404) { @@ -621,6 +627,14 @@ class Api { // https://github.com/truenas/middleware/pull/6934 // then use core.bulk to delete all + /** + * + * /usr/lib/python3/dist-packages/middlewared/plugins/replication.py + * readonly enum=["SET", "REQUIRE", "IGNORE"] + * + * @param {*} data + * @returns + */ async ReplicationRunOnetime(data) { const httpClient = await this.getHttpClient(false); @@ -652,6 +666,8 @@ class Api { // 200 means the 'job' was accepted only // must continue to check the status of the job to know when it has finished and if it was successful // /core/get_jobs [["id", "=", jobidhere]] + // state = SUCCESS/ABORTED/FAILED means finality has been reached + // state = RUNNING if (response.statusCode == 200) { return response.body; } From 97c1e01c1baebc7aa4e016c37114676d4634265d Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 23 Jun 2021 07:58:52 -0600 Subject: [PATCH 08/44] lock in search logic Signed-off-by: Travis Glenn Hansen --- src/driver/freenas/api.js | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/driver/freenas/api.js b/src/driver/freenas/api.js index 4e15a25..5da068b 100644 --- a/src/driver/freenas/api.js +++ b/src/driver/freenas/api.js @@ -1581,6 +1581,7 @@ class FreeNASApiDriver extends CsiBaseDriver { } async removeSnapshotsFromDatatset(datasetName, options = {}) { + // TODO: alter the logic here to not be n+1 const httpClient = await this.getHttpClient(); const httpApiClient = await this.getTrueNASHttpApiClient(); @@ -2132,7 +2133,9 @@ class FreeNASApiDriver extends CsiBaseDriver { zb.helpers.extractDatasetName(fullSnapshotName), ], target_dataset: datasetName, - name_regex: zb.helpers.extractSnapshotName(fullSnapshotName), + name_regex: `^${zb.helpers.extractSnapshotName( + fullSnapshotName + )}$`, recursive: false, retention_policy: "NONE", readonly: "IGNORE", @@ -2277,7 +2280,9 @@ class FreeNASApiDriver extends CsiBaseDriver { zb.helpers.extractDatasetName(fullSnapshotName), ], target_dataset: datasetName, - name_regex: zb.helpers.extractSnapshotName(fullSnapshotName), + name_regex: `^${zb.helpers.extractSnapshotName( + fullSnapshotName + )}$`, recursive: false, retention_policy: "NONE", readonly: "IGNORE", @@ -3160,6 +3165,7 @@ class FreeNASApiDriver extends CsiBaseDriver { throw new Error("dataset does not exist"); } else if (response.statusCode == 200) { for (let snapshot of response.body.snapshots) { + // TODO: alter the logic here to not be n+1 let i_response = await httpApiClient.SnapshotGet( snapshot.name, zfsProperties @@ -3187,6 +3193,7 @@ class FreeNASApiDriver extends CsiBaseDriver { } else if (response.statusCode == 200) { for (let child of response.body.children) { for (let snapshot of child.snapshots) { + // TODO: alter the logic here to not be n+1 let i_response = await httpApiClient.SnapshotGet( snapshot.name, zfsProperties @@ -3259,6 +3266,7 @@ class FreeNASApiDriver extends CsiBaseDriver { } else if (response.statusCode == 200) { for (let child of response.body.children) { for (let grandchild of child.children) { + // TODO: ask for full snapshot properties to be returned in the above endpoint to avoid the n+1 logic here let i_response = httpApiClient.normalizeProperties( grandchild, zfsProperties @@ -3504,7 +3512,7 @@ class FreeNASApiDriver extends CsiBaseDriver { name; snapshotDatasetName = datasetName + "/" + name; - // create target dataset + // create target dataset parent await httpApiClient.DatasetCreate(datasetName, { create_ancestors: true, }); @@ -3530,7 +3538,7 @@ class FreeNASApiDriver extends CsiBaseDriver { transport: "LOCAL", source_datasets: [zb.helpers.extractDatasetName(tmpSnapshotName)], target_dataset: snapshotDatasetName, - name_regex: zb.helpers.extractSnapshotName(tmpSnapshotName), + name_regex: `^${zb.helpers.extractSnapshotName(tmpSnapshotName)}$`, recursive: false, retention_policy: "NONE", readonly: "IGNORE", From 1f87ea8d776d9a5676b35be91d2ab7e80aff67cc Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 23 Jun 2021 08:34:49 -0600 Subject: [PATCH 09/44] proper exports for freenas ssh driver rename Signed-off-by: Travis Glenn Hansen --- src/driver/freenas/ssh.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/driver/freenas/ssh.js b/src/driver/freenas/ssh.js index b7b07f4..605b873 100644 --- a/src/driver/freenas/ssh.js +++ b/src/driver/freenas/ssh.js @@ -18,7 +18,7 @@ const FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME = // used for in-memory cache of the version info const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version"; -class FreeNASDriver extends ControllerZfsSshBaseDriver { +class FreeNASSshDriver extends ControllerZfsSshBaseDriver { /** * cannot make this a storage class parameter as storage class/etc context is *not* sent * into various calls such as GetControllerCapabilities etc @@ -1839,4 +1839,4 @@ function IsJsonString(str) { return true; } -module.exports.FreeNASDriver = FreeNASDriver; +module.exports.FreeNASSshDriver = FreeNASSshDriver; From a26488ca1806dbd4ca1f49c7ad315f8f5039fb51 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 23 Jun 2021 12:15:57 -0600 Subject: [PATCH 10/44] more robust handling of stale nfs filesystems Signed-off-by: Travis Glenn Hansen --- src/driver/index.js | 91 ++++++++++++++++++++++++++++++++++++----- src/utils/filesystem.js | 8 +++- src/utils/mount.js | 33 +++++++++++++-- 3 files changed, 117 insertions(+), 15 deletions(-) diff --git a/src/driver/index.js b/src/driver/index.js index cdb1519..74852c4 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -450,7 +450,8 @@ class CsiBaseDriver { // compare all device-mapper slaves with the newly created devices // if any of the new devices are device-mapper slaves treat this as a // multipath scenario - let allDeviceMapperSlaves = await filesystem.getAllDeviceMapperSlaveDevices(); + let allDeviceMapperSlaves = + await filesystem.getAllDeviceMapperSlaveDevices(); let commonDevices = allDeviceMapperSlaves.filter((value) => iscsiDevices.includes(value) ); @@ -581,6 +582,7 @@ class CsiBaseDriver { * @param {*} call */ async NodeUnstageVolume(call) { + const driver = this; const mount = new Mount(); const filesystem = new Filesystem(); const iscsi = new ISCSI(); @@ -606,7 +608,21 @@ class CsiBaseDriver { //result = await mount.pathIsMounted(block_path); //result = await mount.pathIsMounted(staging_target_path) - result = await mount.pathIsMounted(block_path); + try { + result = await mount.pathIsMounted(block_path); + } catch (err) { + /** + * on stalled fs such as nfs, even findmnt will return immediately for the base mount point + * so in the case of timeout here (base mount point and then a file/folder beneath it) we almost certainly are not a block device + * AND the fs is probably stalled + */ + if (err.timeout) { + result = false; + } else { + throw err; + } + } + if (result) { is_block = true; access_type = "block"; @@ -626,7 +642,32 @@ class CsiBaseDriver { result = await mount.pathIsMounted(normalized_staging_path); if (result) { - result = await mount.umount(normalized_staging_path, umount_args); + try { + result = await mount.umount(normalized_staging_path, umount_args); + } catch (err) { + if (err.timeout) { + driver.ctx.logger.warn( + `hit timeout waiting to unmount path: ${normalized_staging_path}` + ); + result = await mount.getMountDetails(normalized_staging_path); + switch (result.fstype) { + case "nfs": + driver.ctx.logger.warn( + `detected stale nfs filesystem, attempting to force unmount: ${normalized_staging_path}` + ); + result = await mount.umount( + normalized_staging_path, + umount_args.concat(["--force", "--lazy"]) + ); + break; + default: + throw err; + break; + } + } else { + throw err; + } + } } if (is_block) { @@ -666,14 +707,13 @@ class CsiBaseDriver { session.attached_scsi_devices.host && session.attached_scsi_devices.host.devices ) { - is_attached_to_session = session.attached_scsi_devices.host.devices.some( - (device) => { + is_attached_to_session = + session.attached_scsi_devices.host.devices.some((device) => { if (device.attached_scsi_disk == block_device_info_i.name) { return true; } return false; - } - ); + }); } if (is_attached_to_session) { @@ -864,6 +904,7 @@ class CsiBaseDriver { } async NodeUnpublishVolume(call) { + const driver = this; const mount = new Mount(); const filesystem = new Filesystem(); let result; @@ -874,7 +915,33 @@ class CsiBaseDriver { result = await mount.pathIsMounted(target_path); if (result) { - result = await mount.umount(target_path, umount_args); + try { + result = await mount.umount(target_path, umount_args); + } catch (err) { + if (err.timeout) { + driver.ctx.logger.warn( + `hit timeout waiting to unmount path: ${target_path}` + ); + // bind mounts do show the 'real' fs details + result = await mount.getMountDetails(target_path); + switch (result.fstype) { + case "nfs": + driver.ctx.logger.warn( + `detected stale nfs filesystem, attempting to force unmount: ${target_path}` + ); + result = await mount.umount( + target_path, + umount_args.concat(["--force", "--lazy"]) + ); + break; + default: + throw err; + break; + } + } else { + throw err; + } + } } result = await filesystem.pathExists(target_path); @@ -909,7 +976,7 @@ class CsiBaseDriver { //VOLUME_CONDITION if ( semver.satisfies(driver.ctx.csiVersion, ">=1.3.0") && - options.service.node.capabilities.rpc.includes("VOLUME_CONDITION") + driver.options.service.node.capabilities.rpc.includes("VOLUME_CONDITION") ) { // TODO: let drivers fill ths in let abnormal = false; @@ -930,7 +997,11 @@ class CsiBaseDriver { switch (access_type) { case "mount": - result = await mount.getMountDetails(device_path); + result = await mount.getMountDetails(device_path, [ + "avail", + "size", + "used", + ]); res.usage = [ { diff --git a/src/utils/filesystem.js b/src/utils/filesystem.js index 7bb3684..53d8d99 100644 --- a/src/utils/filesystem.js +++ b/src/utils/filesystem.js @@ -614,10 +614,16 @@ class Filesystem { }); child.on("close", function (code) { - const result = { code, stdout, stderr }; + const result = { code, stdout, stderr, timeout: false }; if (timeout) { clearTimeout(timeout); } + + if (code === null) { + result.timeout = true; + reject(result); + } + if (code) { console.log( "failed to execute filesystem command: %s, response: %j", diff --git a/src/utils/mount.js b/src/utils/mount.js index 9058090..1b1b6ba 100644 --- a/src/utils/mount.js +++ b/src/utils/mount.js @@ -1,14 +1,17 @@ const cp = require("child_process"); const { Filesystem } = require("../utils/filesystem"); +// avoid using avail,size,used as it causes hangs when the fs is stale FINDMNT_COMMON_OPTIONS = [ "--output", - "source,target,fstype,label,options,avail,size,used", + "source,target,fstype,label,options", "-b", "-J", "--nofsroot", // prevents unwanted behavior with cifs volumes ]; +DEFAUT_TIMEOUT = 30000; + class Mount { constructor(options = {}) { const mount = this; @@ -142,9 +145,15 @@ class Mount { * * @param {*} path */ - async getMountDetails(path) { + async getMountDetails(path, extraOutputProperties = []) { const mount = this; let args = []; + const common_options = FINDMNT_COMMON_OPTIONS; + if (extraOutputProperties.length > 0) { + common_options[1] = + common_options[1] + "," + extraOutputProperties.join(","); + } + args = args.concat(["--mountpoint", path]); args = args.concat(FINDMNT_COMMON_OPTIONS); let result; @@ -279,7 +288,11 @@ class Mount { return true; } - exec(command, args, options) { + exec(command, args, options = {}) { + if (!options.hasOwnProperty("timeout")) { + options.timeout = DEFAUT_TIMEOUT; + } + const mount = this; args = args || []; @@ -303,6 +316,10 @@ class Mount { console.log("executing mount command: %s", cleansedLog); const child = mount.options.executor.spawn(command, args, options); + /** + * timeout option natively supported since v16 + * TODO: properly handle this based on nodejs version + */ let didTimeout = false; if (options && options.timeout) { timeout = setTimeout(() => { @@ -321,10 +338,18 @@ class Mount { }); child.on("close", function (code) { - const result = { code, stdout, stderr }; + const result = { code, stdout, stderr, timeout: false }; + if (timeout) { clearTimeout(timeout); } + + // timeout scenario + if (code === null) { + result.timeout = true; + reject(result); + } + if (code) { reject(result); } else { From 185d487d63dd8c09d7f075d90b3452fdff14b0da Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 23 Jun 2021 15:35:14 -0600 Subject: [PATCH 11/44] bump deps Signed-off-by: Travis Glenn Hansen --- package-lock.json | 375 ++++++++++++++++++++++------------------------ package.json | 2 +- 2 files changed, 181 insertions(+), 196 deletions(-) diff --git a/package-lock.json b/package-lock.json index 57c89bf..6363d4d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -16,7 +16,7 @@ "lru-cache": "^6.0.0", "request": "^2.88.2", "semver": "^7.3.4", - "ssh2": "^0.8.9", + "ssh2": "^1.1.0", "uri-js": "^4.4.1", "uuid": "^8.3.2", "winston": "^3.3.3", @@ -36,20 +36,26 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.14.0", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.0.tgz", - "integrity": "sha512-V3ts7zMSu5lfiwWDVWzRDGIN+lnCEUdaXgtVHJgLb1rGaA6jMrtB9EmE7L18foXJIE8Un/A/h6NJfGQp/e1J4A==", - "dev": true + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz", + "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } }, "node_modules/@babel/highlight": { - "version": "7.14.0", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.0.tgz", - "integrity": "sha512-YSCOwxvTYEIMSGaBQb5kDDsCopDdiUGsqpatp3fOlI4+2HQSkTmEVWnVuySdAC5EWCqSWWTv0ib63RjR7dTBdg==", + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.5.tgz", + "integrity": "sha512-qf9u2WFWVV0MppaL877j2dBtQIDgmidgjGk5VIMw3OadXvYaXn66U1BFlH2t4+t3i+8PhedppRv+i40ABzd+gg==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.14.0", + "@babel/helper-validator-identifier": "^7.14.5", "chalk": "^2.0.0", "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" } }, "node_modules/@babel/highlight/node_modules/ansi-styles": { @@ -93,6 +99,15 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "dev": true }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, "node_modules/@babel/highlight/node_modules/has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", @@ -125,15 +140,15 @@ } }, "node_modules/@eslint/eslintrc": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.1.tgz", - "integrity": "sha512-5v7TDE9plVhvxQeWLXDTvFvJBdH6pEsdnl2g/dAptmuFEPedQ4Erq5rsDsX+mvAM610IhNaO2W5V1dOOnDKxkQ==", + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.2.tgz", + "integrity": "sha512-8nmGq/4ycLpIwzvhI4tNDmQztZ8sp+hI7cyG8i1nQDhkAbRzHpXPidRAHlNvCZQpJTKw5ItIpMw9RSToGF00mg==", "dev": true, "dependencies": { "ajv": "^6.12.4", "debug": "^4.1.1", "espree": "^7.3.0", - "globals": "^12.1.0", + "globals": "^13.9.0", "ignore": "^4.0.6", "import-fresh": "^3.2.1", "js-yaml": "^3.13.1", @@ -153,21 +168,6 @@ "sprintf-js": "~1.0.2" } }, - "node_modules/@eslint/eslintrc/node_modules/globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", - "dev": true, - "dependencies": { - "type-fest": "^0.8.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/@eslint/eslintrc/node_modules/js-yaml": { "version": "3.14.1", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", @@ -181,15 +181,6 @@ "js-yaml": "bin/js-yaml.js" } }, - "node_modules/@eslint/eslintrc/node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/@grpc/proto-loader": { "version": "0.6.2", "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.2.tgz", @@ -285,9 +276,9 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "node_modules/@types/node": { - "version": "15.0.3", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.0.3.tgz", - "integrity": "sha512-/WbxFeBU+0F79z9RdEOXH4CsDga+ibi5M8uEYr91u3CkT/pdWcV8MCook+4wDPnZBexRdwWS+PiVZ2xJviAzcQ==" + "version": "15.12.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-15.12.4.tgz", + "integrity": "sha512-zrNj1+yqYF4WskCMOHwN+w9iuD12+dGm0rQ35HLl9/Ouuq52cEtd0CH9qMgrdNmi5ejC1/V7vKEXYubB+65DkA==" }, "node_modules/acorn": { "version": "7.4.1", @@ -620,6 +611,19 @@ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" }, + "node_modules/cpu-features": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/cpu-features/-/cpu-features-0.0.2.tgz", + "integrity": "sha512-/2yieBqvMcRj8McNzkycjW2v3OIUOibBfd2dLEJ0nWts8NobAxwiyw9phVNS6oDL8x8tz9F7uNVFEVpJncQpeA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.14.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", @@ -749,37 +753,42 @@ } }, "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true, "engines": { - "node": ">=0.8.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/eslint": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.26.0.tgz", - "integrity": "sha512-4R1ieRf52/izcZE7AlLy56uIHHDLT74Yzz2Iv2l6kDaYvEu9x+wMB5dZArVL8SYGXSYV2YAg70FcW5Y5nGGNIg==", + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.29.0.tgz", + "integrity": "sha512-82G/JToB9qIy/ArBzIWG9xvvwL3R86AlCjtGw+A29OMZDqhTybz/MByORSukGxeI+YPCR4coYyITKk8BFH9nDA==", "dev": true, "dependencies": { "@babel/code-frame": "7.12.11", - "@eslint/eslintrc": "^0.4.1", + "@eslint/eslintrc": "^0.4.2", "ajv": "^6.10.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.0.1", "doctrine": "^3.0.0", "enquirer": "^2.3.5", + "escape-string-regexp": "^4.0.0", "eslint-scope": "^5.1.1", "eslint-utils": "^2.1.0", "eslint-visitor-keys": "^2.0.0", "espree": "^7.3.1", "esquery": "^1.4.0", "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", "file-entry-cache": "^6.0.1", "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", + "glob-parent": "^5.1.2", "globals": "^13.6.0", "ignore": "^4.0.6", "import-fresh": "^3.0.0", @@ -788,7 +797,7 @@ "js-yaml": "^3.13.1", "json-stable-stringify-without-jsonify": "^1.0.1", "levn": "^0.4.1", - "lodash": "^4.17.21", + "lodash.merge": "^4.6.2", "minimatch": "^3.0.4", "natural-compare": "^1.4.0", "optionator": "^0.9.1", @@ -797,7 +806,7 @@ "semver": "^7.2.1", "strip-ansi": "^6.0.0", "strip-json-comments": "^3.1.0", - "table": "^6.0.4", + "table": "^6.0.9", "text-table": "^0.2.0", "v8-compile-cache": "^2.0.3" }, @@ -1132,9 +1141,9 @@ } }, "node_modules/globals": { - "version": "13.8.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.8.0.tgz", - "integrity": "sha512-rHtdA6+PDBIjeEvA91rpqzEvk/k3/i7EeNQiryiWuJH0Hw9cpyJMAt2jtbAwUaRdhD+573X4vWw6IcjKPasi9Q==", + "version": "13.9.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.9.0.tgz", + "integrity": "sha512-74/FduwI/JaIrr1H8e71UbDE+5x7pIPs1C2rrwC52SszOo043CsWOZEMW7o2Y58xwm9b+0RBKDxY5n2sUpEFxA==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -2045,12 +2054,6 @@ "node": ">= 0.8.0" } }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, "node_modules/lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", @@ -2067,6 +2070,12 @@ "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=", "dev": true }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, "node_modules/lodash.truncate": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", @@ -2102,19 +2111,19 @@ } }, "node_modules/mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==", + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz", + "integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ==", "engines": { "node": ">= 0.6" } }, "node_modules/mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", + "version": "2.1.31", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz", + "integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==", "dependencies": { - "mime-db": "1.47.0" + "mime-db": "1.48.0" }, "engines": { "node": ">= 0.6" @@ -2406,9 +2415,9 @@ } }, "node_modules/regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", + "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", "dev": true, "engines": { "node": ">=8" @@ -2452,6 +2461,7 @@ "version": "3.4.0", "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", "bin": { "uuid": "bin/uuid" } @@ -2602,27 +2612,20 @@ "dev": true }, "node_modules/ssh2": { - "version": "0.8.9", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-0.8.9.tgz", - "integrity": "sha512-GmoNPxWDMkVpMFa9LVVzQZHF6EW3WKmBwL+4/GeILf2hFmix5Isxm7Amamo8o7bHiU0tC+wXsGcUXOxp8ChPaw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.1.0.tgz", + "integrity": "sha512-CidQLG2ZacoT0Z7O6dOyisj4JdrOrLVJ4KbHjVNz9yI1vO08FAYQPcnkXY9BP8zeYo+J/nBgY6Gg4R7w4WFWtg==", + "hasInstallScript": true, "dependencies": { - "ssh2-streams": "~0.4.10" + "asn1": "^0.2.4", + "bcrypt-pbkdf": "^1.0.2" }, "engines": { - "node": ">=5.2.0" - } - }, - "node_modules/ssh2-streams": { - "version": "0.4.10", - "resolved": "https://registry.npmjs.org/ssh2-streams/-/ssh2-streams-0.4.10.tgz", - "integrity": "sha512-8pnlMjvnIZJvmTzUIIA5nT4jr2ZWNNVHwyXfMGdRJbug9TpI3kd99ffglgfSWqujVv/0gxwMsDn9j9RVst8yhQ==", - "dependencies": { - "asn1": "~0.2.0", - "bcrypt-pbkdf": "^1.0.2", - "streamsearch": "~0.1.2" + "node": ">=10.16.0" }, - "engines": { - "node": ">=5.2.0" + "optionalDependencies": { + "cpu-features": "0.0.2", + "nan": "^2.14.2" } }, "node_modules/sshpk": { @@ -2657,14 +2660,6 @@ "node": "*" } }, - "node_modules/streamsearch": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-0.1.2.tgz", - "integrity": "sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo=", - "engines": { - "node": ">=0.8.0" - } - }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -2722,9 +2717,9 @@ } }, "node_modules/table": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/table/-/table-6.7.0.tgz", - "integrity": "sha512-SAM+5p6V99gYiiy2gT5ArdzgM1dLDed0nkrWmG6Fry/bUS/m9x83BwpJUOf1Qj/x2qJd+thL6IkIx7qPGRxqBw==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/table/-/table-6.7.1.tgz", + "integrity": "sha512-ZGum47Yi6KOOFDE8m223td53ath2enHcYLgOCjGr5ngu8bdIARQk6mN/wRMv4yMRcHnCSnHbCEha4sobQx5yWg==", "dev": true, "dependencies": { "ajv": "^8.0.1", @@ -2739,9 +2734,9 @@ } }, "node_modules/table/node_modules/ajv": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.3.0.tgz", - "integrity": "sha512-RYE7B5An83d7eWnDR8kbdaIFqmKCNsP16ay1hDbJEU+sa0e3H9SebskCt0Uufem6cfAVu7Col6ubcn/W+Sm8/Q==", + "version": "8.6.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.0.tgz", + "integrity": "sha512-cnUG4NSBiM4YFBxgZIj/In3/6KX+rQ2l2YPRVcvAMQGWEPKuXoPIhxzwqh31jA3IPbI4qEOp/5ILI4ynioXsGQ==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.1", @@ -2829,9 +2824,9 @@ } }, "node_modules/uglify-js": { - "version": "3.13.6", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.13.6.tgz", - "integrity": "sha512-rRprLwl8RVaS+Qvx3Wh5hPfPBn9++G6xkGlUupya0s5aDmNjI7z3lnRLB3u7sN4OmbB0pWgzhM9BEJyiWAwtAA==", + "version": "3.13.9", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.13.9.tgz", + "integrity": "sha512-wZbyTQ1w6Y7fHdt8sJnHfSIuWeDgk6B5rCb4E/AM6QNNPbOMIZph21PW5dRB3h7Df0GszN+t7RuUH6sWK5bF0g==", "optional": true, "bin": { "uglifyjs": "bin/uglifyjs" @@ -3031,9 +3026,9 @@ } }, "node_modules/yargs-parser": { - "version": "20.2.7", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.7.tgz", - "integrity": "sha512-FiNkvbeHzB/syOjIUxFDCnhSfzAL8R5vs40MgLFBorXACCOAEaWu0gRZl14vG8MR9AOJIZbmkjhusqBYZ3HTHw==", + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", "engines": { "node": ">=10" } @@ -3050,18 +3045,18 @@ } }, "@babel/helper-validator-identifier": { - "version": "7.14.0", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.0.tgz", - "integrity": "sha512-V3ts7zMSu5lfiwWDVWzRDGIN+lnCEUdaXgtVHJgLb1rGaA6jMrtB9EmE7L18foXJIE8Un/A/h6NJfGQp/e1J4A==", + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz", + "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==", "dev": true }, "@babel/highlight": { - "version": "7.14.0", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.0.tgz", - "integrity": "sha512-YSCOwxvTYEIMSGaBQb5kDDsCopDdiUGsqpatp3fOlI4+2HQSkTmEVWnVuySdAC5EWCqSWWTv0ib63RjR7dTBdg==", + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.5.tgz", + "integrity": "sha512-qf9u2WFWVV0MppaL877j2dBtQIDgmidgjGk5VIMw3OadXvYaXn66U1BFlH2t4+t3i+8PhedppRv+i40ABzd+gg==", "dev": true, "requires": { - "@babel/helper-validator-identifier": "^7.14.0", + "@babel/helper-validator-identifier": "^7.14.5", "chalk": "^2.0.0", "js-tokens": "^4.0.0" }, @@ -3101,6 +3096,12 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "dev": true }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", @@ -3129,15 +3130,15 @@ } }, "@eslint/eslintrc": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.1.tgz", - "integrity": "sha512-5v7TDE9plVhvxQeWLXDTvFvJBdH6pEsdnl2g/dAptmuFEPedQ4Erq5rsDsX+mvAM610IhNaO2W5V1dOOnDKxkQ==", + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.2.tgz", + "integrity": "sha512-8nmGq/4ycLpIwzvhI4tNDmQztZ8sp+hI7cyG8i1nQDhkAbRzHpXPidRAHlNvCZQpJTKw5ItIpMw9RSToGF00mg==", "dev": true, "requires": { "ajv": "^6.12.4", "debug": "^4.1.1", "espree": "^7.3.0", - "globals": "^12.1.0", + "globals": "^13.9.0", "ignore": "^4.0.6", "import-fresh": "^3.2.1", "js-yaml": "^3.13.1", @@ -3154,15 +3155,6 @@ "sprintf-js": "~1.0.2" } }, - "globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", - "dev": true, - "requires": { - "type-fest": "^0.8.1" - } - }, "js-yaml": { "version": "3.14.1", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", @@ -3172,12 +3164,6 @@ "argparse": "^1.0.7", "esprima": "^4.0.0" } - }, - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true } } }, @@ -3269,9 +3255,9 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "@types/node": { - "version": "15.0.3", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.0.3.tgz", - "integrity": "sha512-/WbxFeBU+0F79z9RdEOXH4CsDga+ibi5M8uEYr91u3CkT/pdWcV8MCook+4wDPnZBexRdwWS+PiVZ2xJviAzcQ==" + "version": "15.12.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-15.12.4.tgz", + "integrity": "sha512-zrNj1+yqYF4WskCMOHwN+w9iuD12+dGm0rQ35HLl9/Ouuq52cEtd0CH9qMgrdNmi5ejC1/V7vKEXYubB+65DkA==" }, "acorn": { "version": "7.4.1", @@ -3539,6 +3525,15 @@ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" }, + "cpu-features": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/cpu-features/-/cpu-features-0.0.2.tgz", + "integrity": "sha512-/2yieBqvMcRj8McNzkycjW2v3OIUOibBfd2dLEJ0nWts8NobAxwiyw9phVNS6oDL8x8tz9F7uNVFEVpJncQpeA==", + "optional": true, + "requires": { + "nan": "^2.14.1" + } + }, "cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", @@ -3635,34 +3630,36 @@ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" }, "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true }, "eslint": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.26.0.tgz", - "integrity": "sha512-4R1ieRf52/izcZE7AlLy56uIHHDLT74Yzz2Iv2l6kDaYvEu9x+wMB5dZArVL8SYGXSYV2YAg70FcW5Y5nGGNIg==", + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.29.0.tgz", + "integrity": "sha512-82G/JToB9qIy/ArBzIWG9xvvwL3R86AlCjtGw+A29OMZDqhTybz/MByORSukGxeI+YPCR4coYyITKk8BFH9nDA==", "dev": true, "requires": { "@babel/code-frame": "7.12.11", - "@eslint/eslintrc": "^0.4.1", + "@eslint/eslintrc": "^0.4.2", "ajv": "^6.10.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.0.1", "doctrine": "^3.0.0", "enquirer": "^2.3.5", + "escape-string-regexp": "^4.0.0", "eslint-scope": "^5.1.1", "eslint-utils": "^2.1.0", "eslint-visitor-keys": "^2.0.0", "espree": "^7.3.1", "esquery": "^1.4.0", "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", "file-entry-cache": "^6.0.1", "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", + "glob-parent": "^5.1.2", "globals": "^13.6.0", "ignore": "^4.0.6", "import-fresh": "^3.0.0", @@ -3671,7 +3668,7 @@ "js-yaml": "^3.13.1", "json-stable-stringify-without-jsonify": "^1.0.1", "levn": "^0.4.1", - "lodash": "^4.17.21", + "lodash.merge": "^4.6.2", "minimatch": "^3.0.4", "natural-compare": "^1.4.0", "optionator": "^0.9.1", @@ -3680,7 +3677,7 @@ "semver": "^7.2.1", "strip-ansi": "^6.0.0", "strip-json-comments": "^3.1.0", - "table": "^6.0.4", + "table": "^6.0.9", "text-table": "^0.2.0", "v8-compile-cache": "^2.0.3" }, @@ -3940,9 +3937,9 @@ } }, "globals": { - "version": "13.8.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.8.0.tgz", - "integrity": "sha512-rHtdA6+PDBIjeEvA91rpqzEvk/k3/i7EeNQiryiWuJH0Hw9cpyJMAt2jtbAwUaRdhD+573X4vWw6IcjKPasi9Q==", + "version": "13.9.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.9.0.tgz", + "integrity": "sha512-74/FduwI/JaIrr1H8e71UbDE+5x7pIPs1C2rrwC52SszOo043CsWOZEMW7o2Y58xwm9b+0RBKDxY5n2sUpEFxA==", "dev": true, "requires": { "type-fest": "^0.20.2" @@ -4625,12 +4622,6 @@ "type-check": "~0.4.0" } }, - "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, "lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", @@ -4647,6 +4638,12 @@ "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=", "dev": true }, + "lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, "lodash.truncate": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", @@ -4679,16 +4676,16 @@ } }, "mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==" + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz", + "integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ==" }, "mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", + "version": "2.1.31", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz", + "integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==", "requires": { - "mime-db": "1.47.0" + "mime-db": "1.48.0" } }, "minimatch": { @@ -4920,9 +4917,9 @@ } }, "regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", + "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", "dev": true }, "request": { @@ -5055,21 +5052,14 @@ "dev": true }, "ssh2": { - "version": "0.8.9", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-0.8.9.tgz", - "integrity": "sha512-GmoNPxWDMkVpMFa9LVVzQZHF6EW3WKmBwL+4/GeILf2hFmix5Isxm7Amamo8o7bHiU0tC+wXsGcUXOxp8ChPaw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.1.0.tgz", + "integrity": "sha512-CidQLG2ZacoT0Z7O6dOyisj4JdrOrLVJ4KbHjVNz9yI1vO08FAYQPcnkXY9BP8zeYo+J/nBgY6Gg4R7w4WFWtg==", "requires": { - "ssh2-streams": "~0.4.10" - } - }, - "ssh2-streams": { - "version": "0.4.10", - "resolved": "https://registry.npmjs.org/ssh2-streams/-/ssh2-streams-0.4.10.tgz", - "integrity": "sha512-8pnlMjvnIZJvmTzUIIA5nT4jr2ZWNNVHwyXfMGdRJbug9TpI3kd99ffglgfSWqujVv/0gxwMsDn9j9RVst8yhQ==", - "requires": { - "asn1": "~0.2.0", + "asn1": "^0.2.4", "bcrypt-pbkdf": "^1.0.2", - "streamsearch": "~0.1.2" + "cpu-features": "0.0.2", + "nan": "^2.14.2" } }, "sshpk": { @@ -5093,11 +5083,6 @@ "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=" }, - "streamsearch": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-0.1.2.tgz", - "integrity": "sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo=" - }, "string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -5140,9 +5125,9 @@ } }, "table": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/table/-/table-6.7.0.tgz", - "integrity": "sha512-SAM+5p6V99gYiiy2gT5ArdzgM1dLDed0nkrWmG6Fry/bUS/m9x83BwpJUOf1Qj/x2qJd+thL6IkIx7qPGRxqBw==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/table/-/table-6.7.1.tgz", + "integrity": "sha512-ZGum47Yi6KOOFDE8m223td53ath2enHcYLgOCjGr5ngu8bdIARQk6mN/wRMv4yMRcHnCSnHbCEha4sobQx5yWg==", "dev": true, "requires": { "ajv": "^8.0.1", @@ -5154,9 +5139,9 @@ }, "dependencies": { "ajv": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.3.0.tgz", - "integrity": "sha512-RYE7B5An83d7eWnDR8kbdaIFqmKCNsP16ay1hDbJEU+sa0e3H9SebskCt0Uufem6cfAVu7Col6ubcn/W+Sm8/Q==", + "version": "8.6.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.0.tgz", + "integrity": "sha512-cnUG4NSBiM4YFBxgZIj/In3/6KX+rQ2l2YPRVcvAMQGWEPKuXoPIhxzwqh31jA3IPbI4qEOp/5ILI4ynioXsGQ==", "dev": true, "requires": { "fast-deep-equal": "^3.1.1", @@ -5227,9 +5212,9 @@ "dev": true }, "uglify-js": { - "version": "3.13.6", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.13.6.tgz", - "integrity": "sha512-rRprLwl8RVaS+Qvx3Wh5hPfPBn9++G6xkGlUupya0s5aDmNjI7z3lnRLB3u7sN4OmbB0pWgzhM9BEJyiWAwtAA==", + "version": "3.13.9", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.13.9.tgz", + "integrity": "sha512-wZbyTQ1w6Y7fHdt8sJnHfSIuWeDgk6B5rCb4E/AM6QNNPbOMIZph21PW5dRB3h7Df0GszN+t7RuUH6sWK5bF0g==", "optional": true }, "uri-js": { @@ -5386,9 +5371,9 @@ } }, "yargs-parser": { - "version": "20.2.7", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.7.tgz", - "integrity": "sha512-FiNkvbeHzB/syOjIUxFDCnhSfzAL8R5vs40MgLFBorXACCOAEaWu0gRZl14vG8MR9AOJIZbmkjhusqBYZ3HTHw==" + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==" } } } diff --git a/package.json b/package.json index e559bdb..b37457b 100644 --- a/package.json +++ b/package.json @@ -26,7 +26,7 @@ "lru-cache": "^6.0.0", "request": "^2.88.2", "semver": "^7.3.4", - "ssh2": "^0.8.9", + "ssh2": "^1.1.0", "uri-js": "^4.4.1", "uuid": "^8.3.2", "winston": "^3.3.3", From 498277408a9613d70617869367415494af2aee90 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 24 Jun 2021 17:04:38 -0600 Subject: [PATCH 12/44] lustre-client support Signed-off-by: Travis Glenn Hansen --- examples/lustre-client.yaml | 10 +++++++ src/driver/controller-lustre-client/index.js | 31 ++++++++++++++++++++ src/driver/factory.js | 3 ++ src/driver/index.js | 2 ++ src/driver/node-manual/index.js | 3 ++ 5 files changed, 49 insertions(+) create mode 100644 examples/lustre-client.yaml create mode 100644 src/driver/controller-lustre-client/index.js diff --git a/examples/lustre-client.yaml b/examples/lustre-client.yaml new file mode 100644 index 0000000..7a331b4 --- /dev/null +++ b/examples/lustre-client.yaml @@ -0,0 +1,10 @@ +driver: lustre-client +instance_id: +lustre: + shareHost: server address + shareBasePath: "/some/path" + # shareHost:shareBasePath should be mounted at this location in the controller container + controllerBasePath: "/storage" + dirPermissionsMode: "0777" + dirPermissionsUser: root + dirPermissionsGroup: wheel diff --git a/src/driver/controller-lustre-client/index.js b/src/driver/controller-lustre-client/index.js new file mode 100644 index 0000000..18914a8 --- /dev/null +++ b/src/driver/controller-lustre-client/index.js @@ -0,0 +1,31 @@ +const { ControllerClientCommonDriver } = require("../controller-client-common"); + +/** + * Crude lustre-client driver which simply creates directories to be mounted + * and uses rsync for cloning/snapshots + */ +class ControllerLustreClientDriver extends ControllerClientCommonDriver { + constructor(ctx, options) { + super(...arguments); + } + + getConfigKey() { + return "lustre"; + } + + getVolumeContext(name) { + const driver = this; + const config_key = driver.getConfigKey(); + return { + node_attach_driver: "lustre", + server: this.options[config_key].shareHost, + share: driver.getShareVolumePath(name), + }; + } + + getFsTypes() { + return ["lustre"]; + } +} + +module.exports.ControllerLustreClientDriver = ControllerLustreClientDriver; diff --git a/src/driver/factory.js b/src/driver/factory.js index f10f9b5..413c2df 100644 --- a/src/driver/factory.js +++ b/src/driver/factory.js @@ -7,6 +7,7 @@ const { const { ControllerNfsClientDriver } = require("./controller-nfs-client"); const { ControllerSmbClientDriver } = require("./controller-smb-client"); +const { ControllerLustreClientDriver } = require("./controller-lustre-client"); const { ControllerSynologyDriver } = require("./controller-synology"); const { NodeManualDriver } = require("./node-manual"); @@ -36,6 +37,8 @@ function factory(ctx, options) { return new ControllerSmbClientDriver(ctx, options); case "nfs-client": return new ControllerNfsClientDriver(ctx, options); + case "lustre-client": + return new ControllerLustreClientDriver(ctx, options); case "node-manual": return new NodeManualDriver(ctx, options); default: diff --git a/src/driver/index.js b/src/driver/index.js index 74852c4..89c9df9 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -316,6 +316,7 @@ class CsiBaseDriver { switch (node_attach_driver) { case "nfs": + case "lustre": device = `${volume_context.server}:${volume_context.share}`; break; case "smb": @@ -814,6 +815,7 @@ class CsiBaseDriver { switch (node_attach_driver) { case "nfs": case "smb": + case "lustre": case "iscsi": // ensure appropriate directories/files switch (access_type) { diff --git a/src/driver/node-manual/index.js b/src/driver/node-manual/index.js index 60920a2..d2de424 100644 --- a/src/driver/node-manual/index.js +++ b/src/driver/node-manual/index.js @@ -87,6 +87,9 @@ class NodeManualDriver extends CsiBaseDriver { case "smb": driverResourceType = "filesystem"; fs_types = ["cifs"]; + case "lustre": + driverResourceType = "filesystem"; + fs_types = ["lustre"]; break; case "iscsi": driverResourceType = "volume"; From da94282a5c162f09726ebdc12c7853dd1c98e3cf Mon Sep 17 00:00:00 2001 From: Hunter Madsen Date: Thu, 24 Jun 2021 23:19:27 -0600 Subject: [PATCH 13/44] Synology ISCSI --- src/driver/controller-synology/http/index.js | 302 +++++++++++++++++++ src/driver/controller-synology/index.js | 209 ++++++++++++- 2 files changed, 498 insertions(+), 13 deletions(-) create mode 100644 src/driver/controller-synology/http/index.js diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js new file mode 100644 index 0000000..9a2d306 --- /dev/null +++ b/src/driver/controller-synology/http/index.js @@ -0,0 +1,302 @@ +const request = require("request"); + +const USER_AGENT = "democratic-csi"; + +class SynologyHttpClient { + constructor(options = {}) { + this.options = JSON.parse(JSON.stringify(options)); + this.logger = console; + + setInterval(() => { + console.log("WIPING OUT SYNOLOGY SID"); + this.sid = null; + }, 60 * 1000); + } + + async login() { + if (!this.sid) { + const data = { + api: "SYNO.API.Auth", + version: "2", + method: "login", + account: this.options.username, + passwd: this.options.password, + session: this.options.session, + format: "sid", + }; + + this.authenticating = true; + let response = await this.do_request("GET", "auth.cgi", data); + this.sid = response.body.data.sid; + this.authenticating = false; + } + } + + log_response(error, response, body, options) { + this.logger.debug("SYNOLOGY HTTP REQUEST: " + JSON.stringify(options)); + this.logger.debug("SYNOLOGY HTTP ERROR: " + error); + this.logger.debug("SYNOLOGY HTTP STATUS: " + response.statusCode); + this.logger.debug( + "SYNOLOGY HTTP HEADERS: " + JSON.stringify(response.headers) + ); + this.logger.debug("SYNOLOGY HTTP BODY: " + JSON.stringify(body)); + } + + async do_request(method, path, data = {}) { + const client = this; + if (!this.authenticating) { + await this.login(); + } + + return new Promise((resolve, reject) => { + if (data.api != "SYNO.API.Auth") { + data._sid = this.sid; + } + + const options = { + method: method, + url: `${this.options.protocol}://${this.options.host}:${this.options.port}/webapi/${path}`, + headers: { + Accept: "application/json", + "User-Agent": USER_AGENT, + "Content-Type": "application/json", + }, + json: true, + agentOptions: { + rejectUnauthorized: !!!client.options.allowInsecure, + }, + }; + + switch (method) { + case "GET": + options.qs = data; + break; + default: + options.body = data; + break; + } + + request(options, function (error, response, body) { + client.log_response(...arguments, options); + + if (error) { + reject(error); + } + + if (response.statusCode > 299 || response.statusCode < 200) { + reject(response); + } + + if (response.body.success === false) { + reject(response); + } + + resolve(response); + }); + }); + } + + async GetLunUUIDByName(name) { + const lun_list = { + api: "SYNO.Core.ISCSI.LUN", + version: "1", + method: "list", + }; + + let response = await this.do_request("GET", "entry.cgi", lun_list); + let lun = response.body.data.luns.find((i) => { + return i.name == name; + }); + + if (lun) { + return lun.uuid; + } + } + + async GetTargetByTargetID(target_id) { + let targets = await this.ListTargets(); + let target = targets.find((i) => { + return i.target_id == target_id; + }); + + return target; + } + + async ListTargets() { + const iscsi_target_list = { + api: "SYNO.Core.ISCSI.Target", + version: "1", + path: "entry.cgi", + method: "list", + additional: '["mapped_lun", "status", "acls", "connected_sessions"]', + }; + let response = await this.do_request("GET", "entry.cgi", iscsi_target_list); + return response.body.data.targets; + } + + async CreateLun(data = {}) { + let response; + let iscsi_lun_create = Object.assign(data, { + api: "SYNO.Core.ISCSI.LUN", + version: "1", + method: "create", + }); + + const lun_list = { + api: "SYNO.Core.ISCSI.LUN", + version: "1", + method: "list", + }; + + try { + response = await this.do_request("GET", "entry.cgi", iscsi_lun_create); + return response.body.data.uuid; + } catch (err) { + if ([18990538].includes(err.body.error.code)) { + response = await this.do_request("GET", "entry.cgi", lun_list); + let lun = response.body.data.luns.find((i) => { + return i.name == iscsi_lun_create.name; + }); + return lun.uuid; + } else { + throw err; + } + } + } + + async MapLun(data = {}) { + // this is mapping from the perspective of the lun + let iscsi_target_map = Object.assign(data, { + api: "SYNO.Core.ISCSI.LUN", + method: "map_target", + version: "1", + }); + iscsi_target_map.target_ids = JSON.stringify(iscsi_target_map.target_ids); + + // this is mapping from the perspective of the target + /* + iscsi_target_map = Object.assign(data, { + api: "SYNO.Core.ISCSI.Target", + method: "map_lun", + version: "1", + }); + iscsi_target_map.lun_uuids = JSON.stringify(iscsi_target_map.lun_uuids); + */ + + await this.do_request("GET", "entry.cgi", iscsi_target_map); + } + + async DeleteLun(uuid) { + let iscsi_lun_delete = { + api: "SYNO.Core.ISCSI.LUN", + method: "delete", + version: 1, + uuid: uuid || "", + }; + try { + await this.do_request("GET", "entry.cgi", iscsi_lun_delete); + } catch (err) { + if (![18990505].includes(err.body.error.code)) { + throw err; + } + } + } + + async GetTargetIDByIQN(iqn) { + const iscsi_target_list = { + api: "SYNO.Core.ISCSI.Target", + version: "1", + path: "entry.cgi", + method: "list", + additional: '["mapped_lun", "status", "acls", "connected_sessions"]', + }; + + let response = await this.do_request("GET", "entry.cgi", iscsi_target_list); + let target = response.body.data.targets.find((i) => { + return i.iqn == iqn; + }); + + if (target) { + return target.target_id; + } + } + + async CreateTarget(data = {}) { + let iscsi_target_create = Object.assign(data, { + api: "SYNO.Core.ISCSI.Target", + version: "1", + method: "create", + }); + + let response; + + try { + response = await this.do_request("GET", "entry.cgi", iscsi_target_create); + + return response.body.data.target_id; + } catch (err) { + if ([18990744].includes(err.body.error.code)) { + //do lookup + const iscsi_target_list = { + api: "SYNO.Core.ISCSI.Target", + version: "1", + path: "entry.cgi", + method: "list", + additional: '["mapped_lun", "status", "acls", "connected_sessions"]', + }; + + response = await this.do_request("GET", "entry.cgi", iscsi_target_list); + let target = response.body.data.targets.find((i) => { + return i.iqn == iscsi_target_create.iqn; + }); + + let target_id = target.target_id; + return target_id; + } else { + throw err; + } + } + } + + async DeleteTarget(target_id) { + const iscsi_target_delete = { + api: "SYNO.Core.ISCSI.Target", + method: "delete", + version: "1", + path: "entry.cgi", + }; + + try { + await this.do_request( + "GET", + "entry.cgi", + Object.assign(iscsi_target_delete, { + target_id: JSON.stringify(String(target_id || "")), + }) + ); + } catch (err) { + /** + * 18990710 = non-existant + */ + if (![18990710].includes(err.body.error.code)) { + throw err; + } + } + } + + async ExpandISCSILun(uuid, size) { + const iscsi_lun_extend = { + api: "SYNO.Core.ISCSI.LUN", + method: "set", + version: 1, + }; + + await this.do_request( + "GET", + "entry.cgi", + Object.assign(iscsi_lun_extend, { uuid: uuid, new_size: size }) + ); + } +} + +module.exports.SynologyHttpClient = SynologyHttpClient; diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index 143dd4e..b01e280 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -1,5 +1,6 @@ const { CsiBaseDriver } = require("../index"); const { GrpcError, grpc } = require("../../utils/grpc"); +const SynologyHttpClient = require("./http").SynologyHttpClient; /** * @@ -57,7 +58,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { //"LIST_SNAPSHOTS", //"CLONE_VOLUME", //"PUBLISH_READONLY", - //"EXPAND_VOLUME", + "EXPAND_VOLUME", ]; } @@ -73,6 +74,13 @@ class ControllerSynologyDriver extends CsiBaseDriver { } } + async getHttpClient() { + if (!this.httpClient) { + this.httpClient = new SynologyHttpClient(this.options.httpConnection); + } + return this.httpClient; + } + getDriverResourceType() { switch (this.options.driver) { case "synology-nfs": @@ -98,6 +106,19 @@ class ControllerSynologyDriver extends CsiBaseDriver { } } + buildIscsiName(name) { + let iscsiName = name; + if (this.options.iscsi.namePrefix) { + iscsiName = this.options.iscsi.namePrefix + iscsiName; + } + + if (this.options.iscsi.nameSuffix) { + iscsiName += this.options.iscsi.nameSuffix; + } + + return iscsiName.toLowerCase(); + } + assertCapabilities(capabilities) { const driverResourceType = this.getDriverResourceType(); this.ctx.logger.verbose("validating capabilities: %j", capabilities); @@ -176,6 +197,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { */ async CreateVolume(call) { const driver = this; + const httpClient = await driver.getHttpClient(); let name = call.request.name; let volume_content_source = call.request.volume_content_source; @@ -230,23 +252,84 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); } + let volume_context = {}; switch (driver.getDriverShareType()) { case "nfs": // TODO: create volume here + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); break; case "smb": // TODO: create volume here + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); break; case "iscsi": - // TODO: create volume here + let iscsiName = driver.buildIscsiName(name); + let data; + let iqn = driver.options.iscsi.baseiqn + iscsiName; + data = Object.assign(driver.options.iscsi.targetAttributes, { + name: iscsiName, + iqn, + }); + + let target_id = await httpClient.CreateTarget(data); + data = Object.assign(driver.options.iscsi.lunAttributes, { + name: iscsiName, + location: driver.options.synology.location, + size: capacity_bytes, + }); + let lun_uuid = await httpClient.CreateLun(data); + let target = await httpClient.GetTargetByTargetID(target_id); + + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to lookup target: ${target_id}` + ); + } + + if ( + !target.mapped_luns.some((lun) => { + return lun.lun_uuid == lun_uuid; + }) + ) { + data = { + uuid: lun_uuid, + target_ids: [target_id], + }; + /* + data = { + lun_uuids: [lun_uuid], + target_id: target_id, + }; + */ + await httpClient.MapLun(data); + } + + volume_context = { + node_attach_driver: "iscsi", + portal: driver.options.iscsi.targetPortal || "", + portals: driver.options.iscsi.targetPortals + ? driver.options.iscsi.targetPortals.join(",") + : "", + interface: driver.options.iscsi.interface || "", + iqn, + lun: 0, + }; break; default: - // throw an error + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); break; } - let volume_context = driver.getVolumeContext(name); - volume_context["provisioner_driver"] = driver.options.driver; if (driver.options.instance_id) { volume_context["provisioner_driver_instance_id"] = @@ -256,8 +339,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { const res = { volume: { volume_id: name, - //capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0 - capacity_bytes: 0, + capacity_bytes, // kubernetes currently pukes if capacity is returned as 0 content_source: volume_content_source, volume_context, }, @@ -273,6 +355,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { */ async DeleteVolume(call) { const driver = this; + const httpClient = await driver.getHttpClient(); let name = call.request.volume_id; @@ -283,18 +366,38 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); } + let response; + switch (driver.getDriverShareType()) { case "nfs": // TODO: delete volume here + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); break; case "smb": // TODO: delete volume here + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); break; case "iscsi": - // TODO: delete volume here + let iscsiName = driver.buildIscsiName(name); + let iqn = driver.options.iscsi.baseiqn + iscsiName; + + response = await httpClient.GetLunUUIDByName(iscsiName); + await httpClient.DeleteLun(response); + + response = await httpClient.GetTargetIDByIQN(iqn); + await httpClient.DeleteTarget(response); break; default: - // throw an error + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); break; } @@ -306,10 +409,90 @@ class ControllerSynologyDriver extends CsiBaseDriver { * @param {*} call */ async ControllerExpandVolume(call) { - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` - ); + const driver = this; + const httpClient = await driver.getHttpClient(); + + let name = call.request.volume_id; + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + let capacity_bytes = + call.request.capacity_range.required_bytes || + call.request.capacity_range.limit_bytes; + if (!capacity_bytes) { + //should never happen, value must be set + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume capacity is required (either required_bytes or limit_bytes)` + ); + } + + if ( + call.request.capacity_range.required_bytes > 0 && + call.request.capacity_range.limit_bytes > 0 && + call.request.capacity_range.required_bytes > + call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `required_bytes is greather than limit_bytes` + ); + } + + // ensure *actual* capacity is not greater than limit + if ( + call.request.capacity_range.limit_bytes && + call.request.capacity_range.limit_bytes > 0 && + capacity_bytes > call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required volume capacity is greater than limit` + ); + } + + let node_expansion_required = false; + let response; + + switch (driver.getDriverShareType()) { + case "nfs": + // TODO: expand volume here + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + break; + case "smb": + // TODO: expand volume here + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + break; + case "iscsi": + node_expansion_required = true; + let iscsiName = driver.buildIscsiName(name); + + response = await httpClient.GetLunUUIDByName(iscsiName); + await httpClient.ExpandISCSILun(response, capacity_bytes); + break; + default: + throw new GrpcError( + grpc.status.UNIMPLEMENTED, + `operation not supported by driver` + ); + break; + } + + return { + capacity_bytes, + node_expansion_required, + }; } /** From 86f7d16aa32e6e3c68ce01a4233c21ce444f1dd7 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Fri, 25 Jun 2021 00:33:27 -0600 Subject: [PATCH 14/44] more robust lun id logic Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/index.js | 30 +++++++++++++++++++------ 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index b01e280..d0a9d6c 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -271,6 +271,9 @@ class ControllerSynologyDriver extends CsiBaseDriver { case "iscsi": let iscsiName = driver.buildIscsiName(name); let data; + let target; + let lun_mapping; + let iqn = driver.options.iscsi.baseiqn + iscsiName; data = Object.assign(driver.options.iscsi.targetAttributes, { name: iscsiName, @@ -284,7 +287,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { size: capacity_bytes, }); let lun_uuid = await httpClient.CreateLun(data); - let target = await httpClient.GetTargetByTargetID(target_id); + target = await httpClient.GetTargetByTargetID(target_id); if (!target) { throw new GrpcError( @@ -293,11 +296,11 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); } - if ( - !target.mapped_luns.some((lun) => { - return lun.lun_uuid == lun_uuid; - }) - ) { + lun_mapping = target.mapped_luns.find((lun) => { + return lun.lun_uuid == lun_uuid; + }); + + if (!lun_mapping) { data = { uuid: lun_uuid, target_ids: [target_id], @@ -309,6 +312,19 @@ class ControllerSynologyDriver extends CsiBaseDriver { }; */ await httpClient.MapLun(data); + + // re-retrieve target to ensure proper lun (mapping_index) value is returned + target = await httpClient.GetTargetByTargetID(target_id); + lun_mapping = target.mapped_luns.find((lun) => { + return lun.lun_uuid == lun_uuid; + }); + } + + if (!lun_mapping) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to lookup lun_mapping_id` + ); } volume_context = { @@ -319,7 +335,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { : "", interface: driver.options.iscsi.interface || "", iqn, - lun: 0, + lun: lun_mapping.mapping_index, }; break; default: From 30941409f4b0ab07416ce996bc734c1877ce6b6b Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Fri, 25 Jun 2021 08:53:07 -0600 Subject: [PATCH 15/44] better synology sid handling Signed-off-by: Travis Glenn Hansen --- package-lock.json | 101 ++++++++++++------- package.json | 1 + src/driver/controller-synology/http/index.js | 30 ++++-- 3 files changed, 88 insertions(+), 44 deletions(-) diff --git a/package-lock.json b/package-lock.json index 6363d4d..df139e6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,6 +9,7 @@ "license": "MIT", "dependencies": { "@grpc/proto-loader": "^0.6.0", + "async-mutex": "^0.3.1", "bunyan": "^1.8.15", "grpc-uds": "^0.1.6", "handlebars": "^4.7.7", @@ -182,9 +183,9 @@ } }, "node_modules/@grpc/proto-loader": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.2.tgz", - "integrity": "sha512-q2Qle60Ht2OQBCp9S5hv1JbI4uBBq6/mqSevFNK3ZEgRDBCAkWqZPUhD/K9gXOHrHKluliHiVq2L9sw1mVyAIg==", + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.3.tgz", + "integrity": "sha512-AtMWwb7kY8DdtwIQh2hC4YFM1MzZ22lMA+gjbnCYDgICt14vX2tCa59bDrEjFyOI4LvORjpvT/UhHUdKvsX8og==", "dependencies": { "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", @@ -391,6 +392,14 @@ "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" }, + "node_modules/async-mutex": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.1.tgz", + "integrity": "sha512-vRfQwcqBnJTLzVQo72Sf7KIUbcSUP5hNchx6udI1U6LuPQpfePgdjJzlCe76yFZ8pxlLjn9lwcl/Ya0TSOv0Tw==", + "dependencies": { + "tslib": "^2.1.0" + } + }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", @@ -412,8 +421,7 @@ "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "devOptional": true + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, "node_modules/bcrypt-pbkdf": { "version": "1.0.2", @@ -427,7 +435,6 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "devOptional": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -517,6 +524,14 @@ "wrap-ansi": "^7.0.0" } }, + "node_modules/code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", @@ -603,8 +618,7 @@ "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "devOptional": true + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" }, "node_modules/core-util-is": { "version": "1.0.2", @@ -1083,8 +1097,7 @@ "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "node_modules/functional-red-black-tree": { "version": "1.0.1", @@ -1112,7 +1125,6 @@ "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -1886,7 +1898,6 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "devOptional": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -2133,7 +2144,6 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "devOptional": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -2239,6 +2249,14 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, + "node_modules/number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -2251,7 +2269,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "devOptional": true, "dependencies": { "wrappy": "1" } @@ -2313,7 +2330,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "devOptional": true, "engines": { "node": ">=0.10.0" } @@ -2783,6 +2799,11 @@ "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" }, + "node_modules/tslib": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz", + "integrity": "sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==" + }, "node_modules/tunnel-agent": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", @@ -2992,8 +3013,7 @@ "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "devOptional": true + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "node_modules/y18n": { "version": "5.0.8", @@ -3168,9 +3188,9 @@ } }, "@grpc/proto-loader": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.2.tgz", - "integrity": "sha512-q2Qle60Ht2OQBCp9S5hv1JbI4uBBq6/mqSevFNK3ZEgRDBCAkWqZPUhD/K9gXOHrHKluliHiVq2L9sw1mVyAIg==", + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.3.tgz", + "integrity": "sha512-AtMWwb7kY8DdtwIQh2hC4YFM1MzZ22lMA+gjbnCYDgICt14vX2tCa59bDrEjFyOI4LvORjpvT/UhHUdKvsX8og==", "requires": { "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", @@ -3340,6 +3360,14 @@ "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" }, + "async-mutex": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.1.tgz", + "integrity": "sha512-vRfQwcqBnJTLzVQo72Sf7KIUbcSUP5hNchx6udI1U6LuPQpfePgdjJzlCe76yFZ8pxlLjn9lwcl/Ya0TSOv0Tw==", + "requires": { + "tslib": "^2.1.0" + } + }, "asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", @@ -3358,8 +3386,7 @@ "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "devOptional": true + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, "bcrypt-pbkdf": { "version": "1.0.2", @@ -3373,7 +3400,6 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "devOptional": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -3441,6 +3467,10 @@ "wrap-ansi": "^7.0.0" } }, + "code-point-at": { + "version": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" + }, "color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", @@ -3517,8 +3547,7 @@ "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "devOptional": true + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" }, "core-util-is": { "version": "1.0.2", @@ -3891,8 +3920,7 @@ "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "functional-red-black-tree": { "version": "1.0.1", @@ -3917,7 +3945,6 @@ "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -4481,7 +4508,6 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "devOptional": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -4692,7 +4718,6 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "devOptional": true, "requires": { "brace-expansion": "^1.1.7" } @@ -4779,6 +4804,10 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, + "number-is-nan": { + "version": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" + }, "oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -4788,7 +4817,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "devOptional": true, "requires": { "wrappy": "1" } @@ -4840,8 +4868,7 @@ "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "devOptional": true + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" }, "path-key": { "version": "3.1.1", @@ -5183,6 +5210,11 @@ "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" }, + "tslib": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz", + "integrity": "sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==" + }, "tunnel-agent": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", @@ -5343,8 +5375,7 @@ "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "devOptional": true + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "y18n": { "version": "5.0.8", diff --git a/package.json b/package.json index b37457b..0027148 100644 --- a/package.json +++ b/package.json @@ -19,6 +19,7 @@ }, "dependencies": { "@grpc/proto-loader": "^0.6.0", + "async-mutex": "^0.3.1", "bunyan": "^1.8.15", "grpc-uds": "^0.1.6", "handlebars": "^4.7.7", diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 9a2d306..fe8895a 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -1,4 +1,5 @@ const request = require("request"); +const Mutex = require("async-mutex").Mutex; const USER_AGENT = "democratic-csi"; @@ -6,11 +7,14 @@ class SynologyHttpClient { constructor(options = {}) { this.options = JSON.parse(JSON.stringify(options)); this.logger = console; + this.doLoginMutex = new Mutex(); - setInterval(() => { - console.log("WIPING OUT SYNOLOGY SID"); - this.sid = null; - }, 60 * 1000); + if (false) { + setInterval(() => { + console.log("WIPING OUT SYNOLOGY SID"); + this.sid = null; + }, 5 * 1000); + } } async login() { @@ -25,11 +29,11 @@ class SynologyHttpClient { format: "sid", }; - this.authenticating = true; let response = await this.do_request("GET", "auth.cgi", data); this.sid = response.body.data.sid; - this.authenticating = false; } + + return this.sid; } log_response(error, response, body, options) { @@ -44,13 +48,17 @@ class SynologyHttpClient { async do_request(method, path, data = {}) { const client = this; - if (!this.authenticating) { - await this.login(); + const isAuth = data.api == "SYNO.API.Auth" && data.method == "login"; + let sid; + if (!isAuth) { + sid = await this.doLoginMutex.runExclusive(async () => { + return await this.login(); + }); } return new Promise((resolve, reject) => { if (data.api != "SYNO.API.Auth") { - data._sid = this.sid; + data._sid = sid; } const options = { @@ -88,6 +96,10 @@ class SynologyHttpClient { } if (response.body.success === false) { + // remove invalid sid + if (response.body.error.code == 119 && sid == client.sid) { + client.sid = null; + } reject(response); } From a13b30ab3d1f3f754e029dce9cdf9971fcca9035 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Fri, 25 Jun 2021 14:17:34 -0600 Subject: [PATCH 16/44] fixup bad Object.assign usage Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/http/index.js | 83 ++++++++++++-------- src/driver/controller-synology/index.js | 32 +++++--- 2 files changed, 71 insertions(+), 44 deletions(-) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index fe8895a..59b632c 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -46,7 +46,7 @@ class SynologyHttpClient { this.logger.debug("SYNOLOGY HTTP BODY: " + JSON.stringify(body)); } - async do_request(method, path, data = {}) { + async do_request(method, path, data = {}, options = {}) { const client = this; const isAuth = data.api == "SYNO.API.Auth" && data.method == "login"; let sid; @@ -56,6 +56,8 @@ class SynologyHttpClient { }); } + const invoke_options = options; + return new Promise((resolve, reject) => { if (data.api != "SYNO.API.Auth") { data._sid = sid; @@ -67,9 +69,11 @@ class SynologyHttpClient { headers: { Accept: "application/json", "User-Agent": USER_AGENT, - "Content-Type": "application/json", + "Content-Type": invoke_options.use_form_encoded + ? "application/x-www-form-urlencoded" + : "application/json", }, - json: true, + json: invoke_options.use_form_encoded ? false : true, agentOptions: { rejectUnauthorized: !!!client.options.allowInsecure, }, @@ -80,7 +84,12 @@ class SynologyHttpClient { options.qs = data; break; default: - options.body = data; + if (invoke_options.use_form_encoded) { + //options.body = URLSearchParams(data); + options.form = data; + } else { + options.body = data; + } break; } @@ -91,6 +100,15 @@ class SynologyHttpClient { reject(error); } + if ( + typeof response.body !== "object" && + response.body !== null && + response.headers["content-type"] && + response.headers["content-type"].includes("application/json") + ) { + response.body = JSON.parse(response.body); + } + if (response.statusCode > 299 || response.statusCode < 200) { reject(response); } @@ -134,6 +152,15 @@ class SynologyHttpClient { return target; } + async GetTargetByIQN(iqn) { + let targets = await this.ListTargets(); + let target = targets.find((i) => { + return i.iqn == iqn; + }); + + return target; + } + async ListTargets() { const iscsi_target_list = { api: "SYNO.Core.ISCSI.Target", @@ -148,7 +175,7 @@ class SynologyHttpClient { async CreateLun(data = {}) { let response; - let iscsi_lun_create = Object.assign(data, { + let iscsi_lun_create = Object.assign({}, data, { api: "SYNO.Core.ISCSI.LUN", version: "1", method: "create", @@ -178,7 +205,7 @@ class SynologyHttpClient { async MapLun(data = {}) { // this is mapping from the perspective of the lun - let iscsi_target_map = Object.assign(data, { + let iscsi_target_map = Object.assign({}, data, { api: "SYNO.Core.ISCSI.LUN", method: "map_target", version: "1", @@ -199,42 +226,33 @@ class SynologyHttpClient { } async DeleteLun(uuid) { + uuid = uuid || ""; let iscsi_lun_delete = { api: "SYNO.Core.ISCSI.LUN", method: "delete", version: 1, - uuid: uuid || "", + //uuid: uuid, + uuid: "", + uuids: JSON.stringify([uuid]), + //is_soft_feas_ignored: false, + is_soft_feas_ignored: true, }; try { await this.do_request("GET", "entry.cgi", iscsi_lun_delete); } catch (err) { + /** + * 18990710 = already gone + * LUN_BAD_LUN_UUID = 18990505 + * LUN_NO_SUCH_SNAPSHOT = 18990532 + */ if (![18990505].includes(err.body.error.code)) { throw err; } } } - async GetTargetIDByIQN(iqn) { - const iscsi_target_list = { - api: "SYNO.Core.ISCSI.Target", - version: "1", - path: "entry.cgi", - method: "list", - additional: '["mapped_lun", "status", "acls", "connected_sessions"]', - }; - - let response = await this.do_request("GET", "entry.cgi", iscsi_target_list); - let target = response.body.data.targets.find((i) => { - return i.iqn == iqn; - }); - - if (target) { - return target.target_id; - } - } - async CreateTarget(data = {}) { - let iscsi_target_create = Object.assign(data, { + let iscsi_target_create = Object.assign({}, data, { api: "SYNO.Core.ISCSI.Target", version: "1", method: "create", @@ -262,8 +280,11 @@ class SynologyHttpClient { return i.iqn == iscsi_target_create.iqn; }); - let target_id = target.target_id; - return target_id; + if (target) { + return target.target_id; + } else { + throw err; + } } else { throw err; } @@ -282,7 +303,7 @@ class SynologyHttpClient { await this.do_request( "GET", "entry.cgi", - Object.assign(iscsi_target_delete, { + Object.assign({}, iscsi_target_delete, { target_id: JSON.stringify(String(target_id || "")), }) ); @@ -306,7 +327,7 @@ class SynologyHttpClient { await this.do_request( "GET", "entry.cgi", - Object.assign(iscsi_lun_extend, { uuid: uuid, new_size: size }) + Object.assign({}, iscsi_lun_extend, { uuid: uuid, new_size: size }) ); } } diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index d0a9d6c..889e21b 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -274,32 +274,36 @@ class ControllerSynologyDriver extends CsiBaseDriver { let target; let lun_mapping; - let iqn = driver.options.iscsi.baseiqn + iscsiName; - data = Object.assign(driver.options.iscsi.targetAttributes, { - name: iscsiName, - iqn, - }); - - let target_id = await httpClient.CreateTarget(data); - data = Object.assign(driver.options.iscsi.lunAttributes, { + // create lun + data = Object.assign({}, driver.options.iscsi.lunAttributes, { name: iscsiName, location: driver.options.synology.location, size: capacity_bytes, }); let lun_uuid = await httpClient.CreateLun(data); - target = await httpClient.GetTargetByTargetID(target_id); + // create target + let iqn = driver.options.iscsi.baseiqn + iscsiName; + data = Object.assign({}, driver.options.iscsi.targetAttributes, { + name: iscsiName, + iqn, + }); + let target_id = await httpClient.CreateTarget(data); + //target = await httpClient.GetTargetByTargetID(target_id); + target = await httpClient.GetTargetByIQN(iqn); if (!target) { throw new GrpcError( grpc.status.UNKNOWN, - `failed to lookup target: ${target_id}` + `failed to lookup target: ${iqn}` ); } + // check if mapping of lun <-> target already exists lun_mapping = target.mapped_luns.find((lun) => { return lun.lun_uuid == lun_uuid; }); + // create mapping if not present already if (!lun_mapping) { data = { uuid: lun_uuid, @@ -403,11 +407,13 @@ class ControllerSynologyDriver extends CsiBaseDriver { let iscsiName = driver.buildIscsiName(name); let iqn = driver.options.iscsi.baseiqn + iscsiName; + response = await httpClient.GetTargetByIQN(iqn); + if (response) { + await httpClient.DeleteTarget(response.target_id); + } + response = await httpClient.GetLunUUIDByName(iscsiName); await httpClient.DeleteLun(response); - - response = await httpClient.GetTargetIDByIQN(iqn); - await httpClient.DeleteTarget(response); break; default: throw new GrpcError( From c761b426d423fdcddf1f88ab60f695b2035dded7 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sat, 26 Jun 2021 09:25:11 -0600 Subject: [PATCH 17/44] more aggressive logic for force unmounts on stale mounts Signed-off-by: Travis Glenn Hansen --- src/driver/index.js | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/src/driver/index.js b/src/driver/index.js index 89c9df9..fe37c43 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -597,7 +597,8 @@ class CsiBaseDriver { const staging_target_path = call.request.staging_target_path; const block_path = staging_target_path + "/block_device"; let normalized_staging_path = staging_target_path; - const umount_args = []; // --force + const umount_args = []; + const umount_force_extra_args = ["--force", "--lazy"]; if (!staging_target_path) { throw new GrpcError( @@ -618,7 +619,14 @@ class CsiBaseDriver { * AND the fs is probably stalled */ if (err.timeout) { - result = false; + driver.ctx.logger.warn( + `detected stale mount, attempting to force unmount: ${normalized_staging_path}` + ); + await mount.umount( + normalized_staging_path, + umount_args.concat(umount_force_extra_args) + ); + result = false; // assume we are *NOT* a block device at this point } else { throw err; } @@ -653,12 +661,13 @@ class CsiBaseDriver { result = await mount.getMountDetails(normalized_staging_path); switch (result.fstype) { case "nfs": + case "nfs4": driver.ctx.logger.warn( `detected stale nfs filesystem, attempting to force unmount: ${normalized_staging_path}` ); result = await mount.umount( normalized_staging_path, - umount_args.concat(["--force", "--lazy"]) + umount_args.concat(umount_force_extra_args) ); break; default: @@ -913,9 +922,29 @@ class CsiBaseDriver { const volume_id = call.request.volume_id; const target_path = call.request.target_path; - const umount_args = []; // --force + const umount_args = []; + const umount_force_extra_args = ["--force", "--lazy"]; + + try { + result = await mount.pathIsMounted(target_path); + } catch (err) { + // running findmnt on non-existant paths return immediately + // the only time this should timeout is on a stale fs + // so if timeout is hit we should be near certain it is indeed mounted + if (err.timeout) { + driver.ctx.logger.warn( + `detected stale mount, attempting to force unmount: ${target_path}` + ); + await mount.umount( + target_path, + umount_args.concat(umount_force_extra_args) + ); + result = false; // assume we have fully unmounted + } else { + throw err; + } + } - result = await mount.pathIsMounted(target_path); if (result) { try { result = await mount.umount(target_path, umount_args); @@ -928,12 +957,13 @@ class CsiBaseDriver { result = await mount.getMountDetails(target_path); switch (result.fstype) { case "nfs": + case "nfs4": driver.ctx.logger.warn( `detected stale nfs filesystem, attempting to force unmount: ${target_path}` ); result = await mount.umount( target_path, - umount_args.concat(["--force", "--lazy"]) + umount_args.concat(umount_force_extra_args) ); break; default: From 66a22d718ffb015098046ca7aed7c1f86b09806c Mon Sep 17 00:00:00 2001 From: Hunter Madsen Date: Mon, 26 Jul 2021 20:08:53 -0600 Subject: [PATCH 18/44] synology snapshots, getcapacity, misc --- src/driver/controller-synology/http/index.js | 149 +++++++++++++++++-- src/driver/controller-synology/index.js | 119 ++++++++++++--- 2 files changed, 230 insertions(+), 38 deletions(-) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 59b632c..53e9496 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -143,6 +143,111 @@ class SynologyHttpClient { } } + async GetLunIDByName(name) { + const lun_list = { + api: "SYNO.Core.ISCSI.LUN", + version: "1", + method: "list", + }; + + let response = await this.do_request("GET", "entry.cgi", lun_list); + let lun = response.body.data.luns.find((i) => { + return i.name == name; + }); + + if (lun) { + return lun.lun_id; + } + } + + async GetLunByName(name) { + const lun_list = { + api: "SYNO.Core.ISCSI.LUN", + version: "1", + method: "list", + }; + + let response = await this.do_request("GET", "entry.cgi", lun_list); + let lun = response.body.data.luns.find((i) => { + return i.name == name; + }); + + if (lun) { + return lun; + } + } + + async GetSnapshotByLunIDAndName(lun_id, name) { + const get_snapshot_info = { + lid: lun_id, //check? + api: "SYNO.Core.Storage.iSCSILUN", + method: "load_snapshot", + version: 1, + }; + + let response = await this.do_request("GET", "entry.cgi", get_snapshot_info); + + if (response.body.data) { + let snapshot = response.body.data.find((i) => { + return i.desc == name; + }); + + if (snapshot) { + return snapshot; + } + } + } + + async GetSnapshotByLunIDAndSnapshotUUID(lun_id, snapshot_uuid) { + const get_snapshot_info = { + lid: lun_id, //check? + api: "SYNO.Core.Storage.iSCSILUN", + method: "load_snapshot", + version: 1, + }; + + let response = await this.do_request("GET", "entry.cgi", get_snapshot_info); + + if (response.body.data) { + let snapshot = response.body.data.find((i) => { + return i.uuid == snapshot_uuid; + }); + + if (snapshot) { + return snapshot; + } + } + } + + async DeleteSnapshot(snapshot_uuid) { + const iscsi_snapshot_delete = { + api: "SYNO.Core.ISCSI.LUN", + method: "delete_snapshot", + version: 1, + snapshot_uuid: snapshot_uuid, // snapshot_id + deleted_by: "democratic_csi", // ? + }; + + let response = await this.do_request( + "GET", + "entry.cgi", + iscsi_snapshot_delete + ); + // return? + } + + async GetVolumeInfo(volume_path) { + let data = { + api: "SYNO.Core.Storage.Volume", + method: "get", + version: "1", + //volume_path: "/volume1", + volume_path, + }; + + return await this.do_request("GET", "entry.cgi", data); + } + async GetTargetByTargetID(target_id) { let targets = await this.ListTargets(); let target = targets.find((i) => { @@ -237,18 +342,30 @@ class SynologyHttpClient { //is_soft_feas_ignored: false, is_soft_feas_ignored: true, }; - try { - await this.do_request("GET", "entry.cgi", iscsi_lun_delete); - } catch (err) { - /** - * 18990710 = already gone - * LUN_BAD_LUN_UUID = 18990505 - * LUN_NO_SUCH_SNAPSHOT = 18990532 - */ - if (![18990505].includes(err.body.error.code)) { - throw err; - } - } + + await this.do_request("GET", "entry.cgi", iscsi_lun_delete); + + // } catch (err) { + // /** + // * 18990710 = already gone + // * LUN_BAD_LUN_UUID = 18990505 + // * LUN_NO_SUCH_SNAPSHOT = 18990532 + // *//* + // if (![18990505].includes(err.body.error.code)) { + // throw err; + // } + // } + // */ + } + + async CreateSnapshot(data) { + data = Object.assign({}, data, { + api: "SYNO.Core.ISCSI.LUN", + method: "take_snapshot", + version: 1, + }); + + return await this.do_request("GET", "entry.cgi", data); } async CreateTarget(data = {}) { @@ -311,9 +428,9 @@ class SynologyHttpClient { /** * 18990710 = non-existant */ - if (![18990710].includes(err.body.error.code)) { - throw err; - } + //if (![18990710].includes(err.body.error.code)) { + throw err; + //} } } @@ -324,7 +441,7 @@ class SynologyHttpClient { version: 1, }; - await this.do_request( + return await this.do_request( "GET", "entry.cgi", Object.assign({}, iscsi_lun_extend, { uuid: uuid, new_size: size }) diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index 889e21b..6beefd9 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -53,8 +53,8 @@ class ControllerSynologyDriver extends CsiBaseDriver { "CREATE_DELETE_VOLUME", //"PUBLISH_UNPUBLISH_VOLUME", //"LIST_VOLUMES", - //"GET_CAPACITY", - //"CREATE_DELETE_SNAPSHOT", + "GET_CAPACITY", + "CREATE_DELETE_SNAPSHOT", //"LIST_SNAPSHOTS", //"CLONE_VOLUME", //"PUBLISH_READONLY", @@ -69,7 +69,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { //"UNKNOWN", "STAGE_UNSTAGE_VOLUME", "GET_VOLUME_STATS", - //"EXPAND_VOLUME" + "EXPAND_VOLUME", ]; } } @@ -407,13 +407,15 @@ class ControllerSynologyDriver extends CsiBaseDriver { let iscsiName = driver.buildIscsiName(name); let iqn = driver.options.iscsi.baseiqn + iscsiName; - response = await httpClient.GetTargetByIQN(iqn); - if (response) { - await httpClient.DeleteTarget(response.target_id); + let target = await httpClient.GetTargetByIQN(iqn); + if (target) { + await httpClient.DeleteTarget(target.target_id); } - response = await httpClient.GetLunUUIDByName(iscsiName); - await httpClient.DeleteLun(response); + let lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); + if (lun_uuid) { + await httpClient.DeleteLun(lun_uuid); + } break; default: throw new GrpcError( @@ -523,10 +525,33 @@ class ControllerSynologyDriver extends CsiBaseDriver { * @param {*} call */ async GetCapacity(call) { - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` + // throw new GrpcError( + // grpc.status.UNIMPLEMENTED, + // `operation not supported by driver` + // ); + + const driver = this; + const httpClient = await driver.getHttpClient(); + + if (!driver.options.synology.location) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing location` + ); + } + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { available_capacity: 0 }; + } + } + + let response = await httpClient.GetVolumeInfo( + driver.options.synology.location ); + return { available_capacity: response.body.data.volume.size_free_byte }; } /** @@ -558,11 +583,8 @@ class ControllerSynologyDriver extends CsiBaseDriver { * @param {*} call */ async CreateSnapshot(call) { - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` - ); const driver = this; + const httpClient = await driver.getHttpClient(); // both these are required let source_volume_id = call.request.source_volume_id; @@ -596,7 +618,47 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); } - // TODO: create snapshot here + // create snapshot here + + let iscsiName = driver.buildIscsiName(source_volume_id); + let lun = await httpClient.GetLunByName(iscsiName); + + if (!lun) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `invalid source_volume_id: ${source_volume_id}` + ); + } + + // check for already exists + let snapshot = await httpClient.GetSnapshotByLunIDAndName(lun.lun_id, name); + if (snapshot) { + return { + snapshot: { + /** + * The purpose of this field is to give CO guidance on how much space + * is needed to create a volume from this snapshot. + */ + size_bytes: 0, + snapshot_id: `/lun/${lun.lun_id}/${snapshot.uuid}`, // add shanpshot_uuid //fixme + source_volume_id: source_volume_id, + //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto + creation_time: { + seconds: snapshot.time, + nanos: 0, + }, + ready_to_use: true, + }, + }; + } + + let data = Object.assign({}, driver.options.iscsi.lunSnapshotAttributes, { + src_lun_uuid: lun.uuid, + taken_by: "democratic-csi", + description: name, //check + }); + + let response = await httpClient.CreateSnapshot(data); return { snapshot: { @@ -605,7 +667,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { * is needed to create a volume from this snapshot. */ size_bytes: 0, - snapshot_id, + snapshot_id: `/lun/${lun.lun_id}/${response.body.data.snapshot_uuid}`, source_volume_id: source_volume_id, //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto creation_time: { @@ -624,12 +686,13 @@ class ControllerSynologyDriver extends CsiBaseDriver { * @param {*} call */ async DeleteSnapshot(call) { - throw new GrpcError( - grpc.status.UNIMPLEMENTED, - `operation not supported by driver` - ); + // throw new GrpcError( + // grpc.status.UNIMPLEMENTED, + // `operation not supported by driver` + // ); const driver = this; + const httpClient = await driver.getHttpClient(); const snapshot_id = call.request.snapshot_id; @@ -640,7 +703,19 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); } - // TODO: delete snapshot here + let parts = snapshot_id.split("/"); + let lun_id = parts[2]; + let snapshot_uuid = parts[3]; + + // TODO: delete snapshot + let snapshot = await httpClient.GetSnapshotByLunIDAndSnapshotUUID( + lun_id, + snapshot_uuid + ); + + if (snapshot) { + await httpClient.DeleteSnapshot(snapshot.uuid); + } return {}; } From f849cfae09527846c96fc66ea1d52549f7440b2c Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Mon, 26 Jul 2021 21:19:13 -0600 Subject: [PATCH 19/44] fix lun delete issues Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/http/index.js | 16 +++------------ src/driver/controller-synology/index.js | 21 ++++++++++---------- 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 53e9496..60d8be1 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -315,6 +315,7 @@ class SynologyHttpClient { method: "map_target", version: "1", }); + iscsi_target_map.uuid = JSON.stringify(iscsi_target_map.uuid); iscsi_target_map.target_ids = JSON.stringify(iscsi_target_map.target_ids); // this is mapping from the perspective of the target @@ -337,25 +338,14 @@ class SynologyHttpClient { method: "delete", version: 1, //uuid: uuid, - uuid: "", + uuid: JSON.stringify(""), uuids: JSON.stringify([uuid]), //is_soft_feas_ignored: false, is_soft_feas_ignored: true, + //feasibility_precheck: true, }; await this.do_request("GET", "entry.cgi", iscsi_lun_delete); - - // } catch (err) { - // /** - // * 18990710 = already gone - // * LUN_BAD_LUN_UUID = 18990505 - // * LUN_NO_SUCH_SNAPSHOT = 18990532 - // *//* - // if (![18990505].includes(err.body.error.code)) { - // throw err; - // } - // } - // */ } async CreateSnapshot(data) { diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index 6beefd9..a8ee3cc 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -25,6 +25,8 @@ class ControllerSynologyDriver extends CsiBaseDriver { options.service.node.capabilities = options.service.node.capabilities || {}; + const driverResourceType = this.getDriverResourceType(); + if (!("service" in options.service.identity.capabilities)) { this.ctx.logger.debug("setting default identity service caps"); @@ -69,8 +71,12 @@ class ControllerSynologyDriver extends CsiBaseDriver { //"UNKNOWN", "STAGE_UNSTAGE_VOLUME", "GET_VOLUME_STATS", - "EXPAND_VOLUME", + //"EXPAND_VOLUME", ]; + + if (driverResourceType == "volume") { + options.service.node.capabilities.rpc.push("EXPAND_VOLUME"); + } } } @@ -277,7 +283,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { // create lun data = Object.assign({}, driver.options.iscsi.lunAttributes, { name: iscsiName, - location: driver.options.synology.location, + location: driver.options.synology.volume, size: capacity_bytes, }); let lun_uuid = await httpClient.CreateLun(data); @@ -525,18 +531,13 @@ class ControllerSynologyDriver extends CsiBaseDriver { * @param {*} call */ async GetCapacity(call) { - // throw new GrpcError( - // grpc.status.UNIMPLEMENTED, - // `operation not supported by driver` - // ); - const driver = this; const httpClient = await driver.getHttpClient(); - if (!driver.options.synology.location) { + if (!driver.options.synology.volume) { throw new GrpcError( grpc.status.FAILED_PRECONDITION, - `invalid configuration: missing location` + `invalid configuration: missing volume` ); } @@ -549,7 +550,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { } let response = await httpClient.GetVolumeInfo( - driver.options.synology.location + driver.options.synology.volume ); return { available_capacity: response.body.data.volume.size_free_byte }; } From f42ae49098f7c2249ea7bc9f9727a81c5ad8fd93 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 27 Jul 2021 00:06:41 -0600 Subject: [PATCH 20/44] more robust volume deletion logic for luns Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/http/index.js | 13 +++++++++++ src/driver/controller-synology/index.js | 23 ++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 60d8be1..3aa19f3 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -348,6 +348,19 @@ class SynologyHttpClient { await this.do_request("GET", "entry.cgi", iscsi_lun_delete); } + async DeleteAllLuns() { + const lun_list = { + api: "SYNO.Core.ISCSI.LUN", + version: "1", + method: "list", + }; + + let response = await this.do_request("GET", "entry.cgi", lun_list); + for (let lun of response.body.data.luns) { + await this.DeleteLun(lun.uuid); + } + } + async CreateSnapshot(data) { data = Object.assign({}, data, { api: "SYNO.Core.ISCSI.LUN", diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index a8ee3cc..da77c8f 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -1,6 +1,7 @@ const { CsiBaseDriver } = require("../index"); const { GrpcError, grpc } = require("../../utils/grpc"); const SynologyHttpClient = require("./http").SynologyHttpClient; +const sleep = require("../../utils/general").sleep; /** * @@ -420,7 +421,29 @@ class ControllerSynologyDriver extends CsiBaseDriver { let lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); if (lun_uuid) { + // this is an async process where a success is returned but delete is happening still behind the scenes + // therefore we continue to search for the lun after delete success call to ensure full deletion await httpClient.DeleteLun(lun_uuid); + + let currentCheck = 0; + let maxChecks = 6; + let waitTimeBetweenChecks = 5 * 1000; + + await sleep(waitTimeBetweenChecks); + lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); + + while (currentCheck <= maxChecks && lun_uuid) { + currentCheck++; + await sleep(waitTimeBetweenChecks); + lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); + } + + if (lun_uuid) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to remove lun: ${lun_uuid}` + ); + } } break; default: From ff73606b556f8d07da598e14cb22433d77292a3a Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 27 Jul 2021 09:58:50 -0600 Subject: [PATCH 21/44] allow configuration options for the lun delete settle time Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/index.js | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index da77c8f..b3cdf77 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -411,6 +411,8 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); break; case "iscsi": + //await httpClient.DeleteAllLuns(); + let iscsiName = driver.buildIscsiName(name); let iqn = driver.options.iscsi.baseiqn + iscsiName; @@ -426,13 +428,14 @@ class ControllerSynologyDriver extends CsiBaseDriver { await httpClient.DeleteLun(lun_uuid); let currentCheck = 0; - let maxChecks = 6; - let waitTimeBetweenChecks = 5 * 1000; + let settleMaxRetries = driver.options.api.lunDelete.settleMaxRetries || 6; + let settleSeconds = driver.options.api.lunDelete.settleSeconds || 5; + let waitTimeBetweenChecks = settleSeconds * 1000; await sleep(waitTimeBetweenChecks); lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); - while (currentCheck <= maxChecks && lun_uuid) { + while (currentCheck <= settleMaxRetries && lun_uuid) { currentCheck++; await sleep(waitTimeBetweenChecks); lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); From eb2bb1dc0153b17fd8f5289751d9c762713a9ea5 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 27 Jul 2021 11:07:12 -0600 Subject: [PATCH 22/44] introduce ability to serial api access, allow toggle settle Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/http/index.js | 70 ++++++++++++-------- src/driver/controller-synology/index.js | 41 +++++++----- 2 files changed, 66 insertions(+), 45 deletions(-) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 3aa19f3..66fe124 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -8,6 +8,7 @@ class SynologyHttpClient { this.options = JSON.parse(JSON.stringify(options)); this.logger = console; this.doLoginMutex = new Mutex(); + this.apiSerializeMutex = new Mutex(); if (false) { setInterval(() => { @@ -50,6 +51,7 @@ class SynologyHttpClient { const client = this; const isAuth = data.api == "SYNO.API.Auth" && data.method == "login"; let sid; + let apiMutexRelease; if (!isAuth) { sid = await this.doLoginMutex.runExclusive(async () => { return await this.login(); @@ -58,8 +60,14 @@ class SynologyHttpClient { const invoke_options = options; + if (!isAuth) { + if (this.options.serialize) { + apiMutexRelease = await this.apiSerializeMutex.acquire(); + } + } + return new Promise((resolve, reject) => { - if (data.api != "SYNO.API.Auth") { + if (!isAuth) { data._sid = sid; } @@ -93,36 +101,42 @@ class SynologyHttpClient { break; } - request(options, function (error, response, body) { - client.log_response(...arguments, options); + try { + request(options, function (error, response, body) { + client.log_response(...arguments, options); - if (error) { - reject(error); - } - - if ( - typeof response.body !== "object" && - response.body !== null && - response.headers["content-type"] && - response.headers["content-type"].includes("application/json") - ) { - response.body = JSON.parse(response.body); - } - - if (response.statusCode > 299 || response.statusCode < 200) { - reject(response); - } - - if (response.body.success === false) { - // remove invalid sid - if (response.body.error.code == 119 && sid == client.sid) { - client.sid = null; + if (error) { + reject(error); } - reject(response); - } - resolve(response); - }); + if ( + typeof response.body !== "object" && + response.body !== null && + response.headers["content-type"] && + response.headers["content-type"].includes("application/json") + ) { + response.body = JSON.parse(response.body); + } + + if (response.statusCode > 299 || response.statusCode < 200) { + reject(response); + } + + if (response.body.success === false) { + // remove invalid sid + if (response.body.error.code == 119 && sid == client.sid) { + client.sid = null; + } + reject(response); + } + + resolve(response); + }); + } finally { + if (typeof apiMutexRelease == "function") { + apiMutexRelease(); + } + } }); } diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index b3cdf77..6504516 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -305,6 +305,8 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); } + target_id = target.target_id; + // check if mapping of lun <-> target already exists lun_mapping = target.mapped_luns.find((lun) => { return lun.lun_uuid == lun_uuid; @@ -411,7 +413,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); break; case "iscsi": - //await httpClient.DeleteAllLuns(); + await httpClient.DeleteAllLuns(); let iscsiName = driver.buildIscsiName(name); let iqn = driver.options.iscsi.baseiqn + iscsiName; @@ -427,25 +429,30 @@ class ControllerSynologyDriver extends CsiBaseDriver { // therefore we continue to search for the lun after delete success call to ensure full deletion await httpClient.DeleteLun(lun_uuid); - let currentCheck = 0; - let settleMaxRetries = driver.options.api.lunDelete.settleMaxRetries || 6; - let settleSeconds = driver.options.api.lunDelete.settleSeconds || 5; - let waitTimeBetweenChecks = settleSeconds * 1000; + let settleEnabled = driver.options.api.lunDelete.settleEnabled; - await sleep(waitTimeBetweenChecks); - lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); - - while (currentCheck <= settleMaxRetries && lun_uuid) { - currentCheck++; + if (settleEnabled) { + let currentCheck = 0; + let settleMaxRetries = + driver.options.api.lunDelete.settleMaxRetries || 6; + let settleSeconds = driver.options.api.lunDelete.settleSeconds || 5; + let waitTimeBetweenChecks = settleSeconds * 1000; + await sleep(waitTimeBetweenChecks); lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); - } - - if (lun_uuid) { - throw new GrpcError( - grpc.status.UNKNOWN, - `failed to remove lun: ${lun_uuid}` - ); + + while (currentCheck <= settleMaxRetries && lun_uuid) { + currentCheck++; + await sleep(waitTimeBetweenChecks); + lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); + } + + if (lun_uuid) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to remove lun: ${lun_uuid}` + ); + } } } break; From a1ddfda03d5b5747b58d59418e517466ba48c37f Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 27 Jul 2021 11:08:15 -0600 Subject: [PATCH 23/44] remove deleteallluns call Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/index.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index 6504516..f405fab 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -413,7 +413,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { ); break; case "iscsi": - await httpClient.DeleteAllLuns(); + //await httpClient.DeleteAllLuns(); let iscsiName = driver.buildIscsiName(name); let iqn = driver.options.iscsi.baseiqn + iscsiName; From 7bcb24cb7c9aa10381c647f83e45212f0b52e8cc Mon Sep 17 00:00:00 2001 From: Hunter Madsen Date: Thu, 5 Aug 2021 22:07:50 -0600 Subject: [PATCH 24/44] Added support for cloning --- src/driver/controller-synology/http/index.js | 47 +++++++++++++ src/driver/controller-synology/index.js | 69 +++++++++++++++++--- 2 files changed, 106 insertions(+), 10 deletions(-) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 66fe124..6f45ed2 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -174,6 +174,23 @@ class SynologyHttpClient { } } + async GetLunByID(lun_id) { + const lun_list = { + api: "SYNO.Core.ISCSI.LUN", + version: "1", + method: "list", + }; + + let response = await this.do_request("GET", "entry.cgi", lun_list); + let lun = response.body.data.luns.find((i) => { + return i.lun_id == lun_id; + }); + + if (lun) { + return lun; + } + } + async GetLunByName(name) { const lun_list = { api: "SYNO.Core.ISCSI.LUN", @@ -464,6 +481,36 @@ class SynologyHttpClient { Object.assign({}, iscsi_lun_extend, { uuid: uuid, new_size: size }) ); } + + + + async CreateClonedVolume(src_lun_uuid, dst_lun_name) { + const create_cloned_volume = { + api: "SYNO.Core.ISCSI.LUN", + version: 1, + method: "clone", + src_lun_uuid: JSON.stringify(src_lun_uuid), // src lun uuid + dst_lun_name: dst_lun_name, // dst lun name + is_same_pool: true, // always true? string? + clone_type: "democratic-csi", // check + } + return await this.do_request("GET", "entry.cgi", create_cloned_volume); + } + + async CreateVolumeFromSnapshot(src_lun_uuid, snapshot_uuid, cloned_lun_name) { + const create_volume_from_snapshot = { + api: "SYNO.Core.ISCSI.LUN", + version: 1, + method: "clone_snapshot", + src_lun_uuid: src_lun_uuid, // src lun uuid, snapshot id? + snapshot_uuid: JSON.stringify(snapshot_uuid), // shaptop uuid + cloned_lun_name: cloned_lun_name, // cloned lun name + clone_type: "democratic-csi", // check + } + return await this.do_request("GET", "entry.cgi", create_volume_from_snapshot); + } + + } module.exports.SynologyHttpClient = SynologyHttpClient; diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index f405fab..01158a9 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -280,14 +280,63 @@ class ControllerSynologyDriver extends CsiBaseDriver { let data; let target; let lun_mapping; + let lun_uuid; + let existingLun; - // create lun - data = Object.assign({}, driver.options.iscsi.lunAttributes, { - name: iscsiName, - location: driver.options.synology.volume, - size: capacity_bytes, - }); - let lun_uuid = await httpClient.CreateLun(data); + if (volume_content_source) { + let src_lun_uuid; + let src_lun_id; + switch (volume_content_source.type) { + case "snapshot": + let parts = volume_content_source.snapshot.snapshot_id.split("/"); + src_lun_id = parts[2]; + let snapshot_uuid = parts[3]; + let src_lun = await httpClient.GetLunByID(src_lun_id); + src_lun_uuid = src_lun.uuid; + + existingLun = await httpClient.GetLunByName(iscsiName); + if (!existingLun) { + await httpClient.CreateVolumeFromSnapshot( + src_lun_uuid, + snapshot_uuid, + iscsiName + ); + } + break; + case "volume": + let srcLunName = driver.buildIscsiName( + volume_content_source.volume.volume_id + ); + src_lun_uuid = await httpClient.GetLunUUIDByName(srcLunName); + + existingLun = httpClient.GetLunByName(iscsiName); + if (!existingLun) { + await httpClient.CreateClonedVolume(src_lun_uuid, iscsiName); + } + break; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `invalid volume_content_source type: ${volume_content_source.type}` + ); + break; + } + // resize to requested amount + + let lun = await httpClient.GetLunByName(iscsiName); + lun_uuid = lun.uuid; + if (lun.size < capacity_bytes) { + await httpClient.ExpandISCSILun(lun_uuid, capacity_bytes); + } + } else { + // create lun + data = Object.assign({}, driver.options.iscsi.lunAttributes, { + name: iscsiName, + location: driver.options.synology.volume, + size: capacity_bytes, + }); + lun_uuid = await httpClient.CreateLun(data); + } // create target let iqn = driver.options.iscsi.baseiqn + iscsiName; @@ -437,16 +486,16 @@ class ControllerSynologyDriver extends CsiBaseDriver { driver.options.api.lunDelete.settleMaxRetries || 6; let settleSeconds = driver.options.api.lunDelete.settleSeconds || 5; let waitTimeBetweenChecks = settleSeconds * 1000; - + await sleep(waitTimeBetweenChecks); lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); - + while (currentCheck <= settleMaxRetries && lun_uuid) { currentCheck++; await sleep(waitTimeBetweenChecks); lun_uuid = await httpClient.GetLunUUIDByName(iscsiName); } - + if (lun_uuid) { throw new GrpcError( grpc.status.UNKNOWN, From f6f637ebe7e1eec63bc540f1446baa7919d3b32f Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 5 Aug 2021 22:42:46 -0600 Subject: [PATCH 25/44] allow operator management of running fsck before mount during stage, custom mkfs options Signed-off-by: Travis Glenn Hansen --- package-lock.json | 313 +++++++++++-------- package.json | 1 + src/driver/controller-synology/http/index.js | 14 +- src/driver/controller-synology/index.js | 12 +- src/driver/index.js | 41 ++- 5 files changed, 238 insertions(+), 143 deletions(-) diff --git a/package-lock.json b/package-lock.json index df139e6..22ca674 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,6 +14,7 @@ "grpc-uds": "^0.1.6", "handlebars": "^4.7.7", "js-yaml": "^4.0.0", + "lodash": "^4.17.21", "lru-cache": "^6.0.0", "request": "^2.88.2", "semver": "^7.3.4", @@ -37,9 +38,9 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz", - "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==", + "version": "7.14.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.9.tgz", + "integrity": "sha512-pQYxPY0UP6IHISRitNe8bsijHex4TWZXi2HwKVsjPiltzlhse2znVcm9Ace510VT1kxIHjGJCZZQBX2gJDbo0g==", "dev": true, "engines": { "node": ">=6.9.0" @@ -141,9 +142,9 @@ } }, "node_modules/@eslint/eslintrc": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.2.tgz", - "integrity": "sha512-8nmGq/4ycLpIwzvhI4tNDmQztZ8sp+hI7cyG8i1nQDhkAbRzHpXPidRAHlNvCZQpJTKw5ItIpMw9RSToGF00mg==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz", + "integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==", "dev": true, "dependencies": { "ajv": "^6.12.4", @@ -183,9 +184,9 @@ } }, "node_modules/@grpc/proto-loader": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.3.tgz", - "integrity": "sha512-AtMWwb7kY8DdtwIQh2hC4YFM1MzZ22lMA+gjbnCYDgICt14vX2tCa59bDrEjFyOI4LvORjpvT/UhHUdKvsX8og==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.4.tgz", + "integrity": "sha512-7xvDvW/vJEcmLUltCUGOgWRPM8Oofv0eCFSVMuKqaqWJaXSzmB+m9hiyqe34QofAl4WAzIKUZZlinIF9FOHyTQ==", "dependencies": { "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", @@ -217,6 +218,26 @@ "node": ">=10" } }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz", + "integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.0", + "debug": "^4.1.1", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.0.tgz", + "integrity": "sha512-wdppn25U8z/2yiaT6YGquE6X8sSv7hNMWSXYSSU1jGv/yd6XqjXgTDJ8KP4NgjTXfJ3GbRjeeb8RTV7a/VpM+w==", + "dev": true + }, "node_modules/@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", @@ -277,9 +298,9 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "node_modules/@types/node": { - "version": "15.12.4", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.12.4.tgz", - "integrity": "sha512-zrNj1+yqYF4WskCMOHwN+w9iuD12+dGm0rQ35HLl9/Ouuq52cEtd0CH9qMgrdNmi5ejC1/V7vKEXYubB+65DkA==" + "version": "16.4.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-16.4.13.tgz", + "integrity": "sha512-bLL69sKtd25w7p1nvg9pigE4gtKVpGTPojBFLMkGHXuUgap2sLqQt2qUnqmVCDfzGUL0DRNZP+1prIZJbMeAXg==" }, "node_modules/acorn": { "version": "7.4.1", @@ -294,9 +315,9 @@ } }, "node_modules/acorn-jsx": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", - "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" @@ -388,9 +409,9 @@ } }, "node_modules/async": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", - "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.1.tgz", + "integrity": "sha512-XdD5lRO/87udXCMC9meWdYiR+Nq6ZjUfXidViUZGu2F1MO4T3XwZ1et0hb2++BgLfhyJwy44BGB/yx80ABx8hg==" }, "node_modules/async-mutex": { "version": "0.3.1", @@ -499,9 +520,9 @@ "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" }, "node_modules/chalk": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", - "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, "dependencies": { "ansi-styles": "^4.1.0", @@ -558,9 +579,9 @@ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, "node_modules/color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.6.0.tgz", + "integrity": "sha512-c/hGS+kRWJutUBEngKKmk4iH3sD59MBkoxVapS/0wgpCz2u7XsNloxknyvBhzwEs1IbV36D9PwqLPJ2DTu3vMA==", "dependencies": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -664,9 +685,9 @@ } }, "node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", + "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", "dev": true, "dependencies": { "ms": "2.1.2" @@ -779,13 +800,14 @@ } }, "node_modules/eslint": { - "version": "7.29.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.29.0.tgz", - "integrity": "sha512-82G/JToB9qIy/ArBzIWG9xvvwL3R86AlCjtGw+A29OMZDqhTybz/MByORSukGxeI+YPCR4coYyITKk8BFH9nDA==", + "version": "7.32.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.32.0.tgz", + "integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==", "dev": true, "dependencies": { "@babel/code-frame": "7.12.11", - "@eslint/eslintrc": "^0.4.2", + "@eslint/eslintrc": "^0.4.3", + "@humanwhocodes/config-array": "^0.5.0", "ajv": "^6.10.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", @@ -1028,9 +1050,9 @@ "dev": true }, "node_modules/fast-safe-stringify": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", - "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.8.tgz", + "integrity": "sha512-lXatBjf3WPjmWD6DpIZxkeSsCOwqI0maYMpgDlx8g4U2qi4lbjA9oH/HD2a87G+KfsUmo5WbJFmqBZlPxtptag==" }, "node_modules/fecha": { "version": "4.2.1", @@ -1063,9 +1085,9 @@ } }, "node_modules/flatted": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.1.1.tgz", - "integrity": "sha512-zAoAQiudy+r5SvnSw3KJy5os/oRJYHzrzja/tBDqrZtNhUw8bt6y8OBzMWcjWr+8liV8Eb6yOhw8WZ7VFZ5ZzA==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.2.tgz", + "integrity": "sha512-JaTY/wtrcSyvXJl4IMFHPKyFur1sE9AUqc0QnhOaJ0CxHtAoIV8pYDzeEfAaNEtGkOfq4gr3LBFmdXW5mOQFnA==", "dev": true }, "node_modules/fn.name": { @@ -1153,9 +1175,9 @@ } }, "node_modules/globals": { - "version": "13.9.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.9.0.tgz", - "integrity": "sha512-74/FduwI/JaIrr1H8e71UbDE+5x7pIPs1C2rrwC52SszOo043CsWOZEMW7o2Y58xwm9b+0RBKDxY5n2sUpEFxA==", + "version": "13.10.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.10.0.tgz", + "integrity": "sha512-piHC3blgLGFjvOuMmWZX60f+na1lXFDhQXBf1UYp2fXPXqvEUbOhNwi6BsQ0bQishwedgnjkwv1d9zKf+MWw3g==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -1951,11 +1973,14 @@ } }, "node_modules/is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", "engines": { "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/is-typedarray": { @@ -2065,6 +2090,11 @@ "node": ">= 0.8.0" } }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, "node_modules/lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", @@ -2122,19 +2152,19 @@ } }, "node_modules/mime-db": { - "version": "1.48.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz", - "integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ==", + "version": "1.49.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.49.0.tgz", + "integrity": "sha512-CIc8j9URtOVApSFCQIF+VBkX1RwXp/oMMOrqdyXSBXq5RWNEsRfyj1kiRnQgmNXmHxPoFIxOroKA3zcU9P+nAA==", "engines": { "node": ">= 0.6" } }, "node_modules/mime-types": { - "version": "2.1.31", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz", - "integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==", + "version": "2.1.32", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.32.tgz", + "integrity": "sha512-hJGaVS4G4c9TSMYh2n6SQAGrC4RnfU+daP8G7cSCmaqNjiOoUY0VHCMS42pxnQmVF1GWwFhbHWn3RIxCqTmZ9A==", "dependencies": { - "mime-db": "1.48.0" + "mime-db": "1.49.0" }, "engines": { "node": ">= 0.6" @@ -2225,9 +2255,9 @@ } }, "node_modules/nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==" + "version": "2.15.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.15.0.tgz", + "integrity": "sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ==" }, "node_modules/natural-compare": { "version": "1.4.0", @@ -2628,9 +2658,9 @@ "dev": true }, "node_modules/ssh2": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.1.0.tgz", - "integrity": "sha512-CidQLG2ZacoT0Z7O6dOyisj4JdrOrLVJ4KbHjVNz9yI1vO08FAYQPcnkXY9BP8zeYo+J/nBgY6Gg4R7w4WFWtg==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.2.0.tgz", + "integrity": "sha512-vklfVRyylayGV/zMwVEkTC9kBhA3t264hoUHV/yGuunBJh6uBGP1VlzhOp8EsqxpKnG0xkLE1qHZlU0+t8Vh6Q==", "hasInstallScript": true, "dependencies": { "asn1": "^0.2.4", @@ -2750,9 +2780,9 @@ } }, "node_modules/table/node_modules/ajv": { - "version": "8.6.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.0.tgz", - "integrity": "sha512-cnUG4NSBiM4YFBxgZIj/In3/6KX+rQ2l2YPRVcvAMQGWEPKuXoPIhxzwqh31jA3IPbI4qEOp/5ILI4ynioXsGQ==", + "version": "8.6.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.2.tgz", + "integrity": "sha512-9807RlWAgT564wT+DjeyU5OFMPjmzxVobvDFmNAhY+5zD6A2ly3jDp6sgnfyDtlIQ+7H97oc/DGCzzfu9rjw9w==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.1", @@ -2845,9 +2875,9 @@ } }, "node_modules/uglify-js": { - "version": "3.13.9", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.13.9.tgz", - "integrity": "sha512-wZbyTQ1w6Y7fHdt8sJnHfSIuWeDgk6B5rCb4E/AM6QNNPbOMIZph21PW5dRB3h7Df0GszN+t7RuUH6sWK5bF0g==", + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.14.1.tgz", + "integrity": "sha512-JhS3hmcVaXlp/xSo3PKY5R0JqKs5M3IV+exdLHW99qKvKivPO4Z8qbej6mte17SOPqAOVMjt/XGgWacnFSzM3g==", "optional": true, "bin": { "uglifyjs": "bin/uglifyjs" @@ -3029,9 +3059,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yargs": { - "version": "17.0.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz", - "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==", + "version": "17.1.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.0.tgz", + "integrity": "sha512-SQr7qqmQ2sNijjJGHL4u7t8vyDZdZ3Ahkmo4sc1w5xI9TBX0QDdG/g4SFnxtWOsGLjwHQue57eFALfwFCnixgg==", "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", @@ -3065,9 +3095,9 @@ } }, "@babel/helper-validator-identifier": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz", - "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==", + "version": "7.14.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.9.tgz", + "integrity": "sha512-pQYxPY0UP6IHISRitNe8bsijHex4TWZXi2HwKVsjPiltzlhse2znVcm9Ace510VT1kxIHjGJCZZQBX2gJDbo0g==", "dev": true }, "@babel/highlight": { @@ -3150,9 +3180,9 @@ } }, "@eslint/eslintrc": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.2.tgz", - "integrity": "sha512-8nmGq/4ycLpIwzvhI4tNDmQztZ8sp+hI7cyG8i1nQDhkAbRzHpXPidRAHlNvCZQpJTKw5ItIpMw9RSToGF00mg==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz", + "integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==", "dev": true, "requires": { "ajv": "^6.12.4", @@ -3188,9 +3218,9 @@ } }, "@grpc/proto-loader": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.3.tgz", - "integrity": "sha512-AtMWwb7kY8DdtwIQh2hC4YFM1MzZ22lMA+gjbnCYDgICt14vX2tCa59bDrEjFyOI4LvORjpvT/UhHUdKvsX8og==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.4.tgz", + "integrity": "sha512-7xvDvW/vJEcmLUltCUGOgWRPM8Oofv0eCFSVMuKqaqWJaXSzmB+m9hiyqe34QofAl4WAzIKUZZlinIF9FOHyTQ==", "requires": { "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", @@ -3215,6 +3245,23 @@ } } }, + "@humanwhocodes/config-array": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz", + "integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==", + "dev": true, + "requires": { + "@humanwhocodes/object-schema": "^1.2.0", + "debug": "^4.1.1", + "minimatch": "^3.0.4" + } + }, + "@humanwhocodes/object-schema": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.0.tgz", + "integrity": "sha512-wdppn25U8z/2yiaT6YGquE6X8sSv7hNMWSXYSSU1jGv/yd6XqjXgTDJ8KP4NgjTXfJ3GbRjeeb8RTV7a/VpM+w==", + "dev": true + }, "@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", @@ -3275,9 +3322,9 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "@types/node": { - "version": "15.12.4", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.12.4.tgz", - "integrity": "sha512-zrNj1+yqYF4WskCMOHwN+w9iuD12+dGm0rQ35HLl9/Ouuq52cEtd0CH9qMgrdNmi5ejC1/V7vKEXYubB+65DkA==" + "version": "16.4.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-16.4.13.tgz", + "integrity": "sha512-bLL69sKtd25w7p1nvg9pigE4gtKVpGTPojBFLMkGHXuUgap2sLqQt2qUnqmVCDfzGUL0DRNZP+1prIZJbMeAXg==" }, "acorn": { "version": "7.4.1", @@ -3286,9 +3333,9 @@ "dev": true }, "acorn-jsx": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", - "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, "requires": {} }, @@ -3356,9 +3403,9 @@ "dev": true }, "async": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", - "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.1.tgz", + "integrity": "sha512-XdD5lRO/87udXCMC9meWdYiR+Nq6ZjUfXidViUZGu2F1MO4T3XwZ1et0hb2++BgLfhyJwy44BGB/yx80ABx8hg==" }, "async-mutex": { "version": "0.3.1", @@ -3448,9 +3495,9 @@ "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" }, "chalk": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", - "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, "requires": { "ansi-styles": "^4.1.0", @@ -3509,9 +3556,9 @@ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, "color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.6.0.tgz", + "integrity": "sha512-c/hGS+kRWJutUBEngKKmk4iH3sD59MBkoxVapS/0wgpCz2u7XsNloxknyvBhzwEs1IbV36D9PwqLPJ2DTu3vMA==", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -3583,9 +3630,9 @@ } }, "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", + "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", "dev": true, "requires": { "ms": "2.1.2" @@ -3665,13 +3712,14 @@ "dev": true }, "eslint": { - "version": "7.29.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.29.0.tgz", - "integrity": "sha512-82G/JToB9qIy/ArBzIWG9xvvwL3R86AlCjtGw+A29OMZDqhTybz/MByORSukGxeI+YPCR4coYyITKk8BFH9nDA==", + "version": "7.32.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.32.0.tgz", + "integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==", "dev": true, "requires": { "@babel/code-frame": "7.12.11", - "@eslint/eslintrc": "^0.4.2", + "@eslint/eslintrc": "^0.4.3", + "@humanwhocodes/config-array": "^0.5.0", "ajv": "^6.10.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", @@ -3863,9 +3911,9 @@ "dev": true }, "fast-safe-stringify": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", - "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.8.tgz", + "integrity": "sha512-lXatBjf3WPjmWD6DpIZxkeSsCOwqI0maYMpgDlx8g4U2qi4lbjA9oH/HD2a87G+KfsUmo5WbJFmqBZlPxtptag==" }, "fecha": { "version": "4.2.1", @@ -3892,9 +3940,9 @@ } }, "flatted": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.1.1.tgz", - "integrity": "sha512-zAoAQiudy+r5SvnSw3KJy5os/oRJYHzrzja/tBDqrZtNhUw8bt6y8OBzMWcjWr+8liV8Eb6yOhw8WZ7VFZ5ZzA==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.2.tgz", + "integrity": "sha512-JaTY/wtrcSyvXJl4IMFHPKyFur1sE9AUqc0QnhOaJ0CxHtAoIV8pYDzeEfAaNEtGkOfq4gr3LBFmdXW5mOQFnA==", "dev": true }, "fn.name": { @@ -3964,9 +4012,9 @@ } }, "globals": { - "version": "13.9.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.9.0.tgz", - "integrity": "sha512-74/FduwI/JaIrr1H8e71UbDE+5x7pIPs1C2rrwC52SszOo043CsWOZEMW7o2Y58xwm9b+0RBKDxY5n2sUpEFxA==", + "version": "13.10.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.10.0.tgz", + "integrity": "sha512-piHC3blgLGFjvOuMmWZX60f+na1lXFDhQXBf1UYp2fXPXqvEUbOhNwi6BsQ0bQishwedgnjkwv1d9zKf+MWw3g==", "dev": true, "requires": { "type-fest": "^0.20.2" @@ -4549,9 +4597,9 @@ } }, "is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==" + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==" }, "is-typedarray": { "version": "1.0.0", @@ -4648,6 +4696,11 @@ "type-check": "~0.4.0" } }, + "lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, "lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", @@ -4702,16 +4755,16 @@ } }, "mime-db": { - "version": "1.48.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz", - "integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ==" + "version": "1.49.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.49.0.tgz", + "integrity": "sha512-CIc8j9URtOVApSFCQIF+VBkX1RwXp/oMMOrqdyXSBXq5RWNEsRfyj1kiRnQgmNXmHxPoFIxOroKA3zcU9P+nAA==" }, "mime-types": { - "version": "2.1.31", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz", - "integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==", + "version": "2.1.32", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.32.tgz", + "integrity": "sha512-hJGaVS4G4c9TSMYh2n6SQAGrC4RnfU+daP8G7cSCmaqNjiOoUY0VHCMS42pxnQmVF1GWwFhbHWn3RIxCqTmZ9A==", "requires": { - "mime-db": "1.48.0" + "mime-db": "1.49.0" } }, "minimatch": { @@ -4783,9 +4836,9 @@ } }, "nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==" + "version": "2.15.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.15.0.tgz", + "integrity": "sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ==" }, "natural-compare": { "version": "1.4.0", @@ -5079,9 +5132,9 @@ "dev": true }, "ssh2": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.1.0.tgz", - "integrity": "sha512-CidQLG2ZacoT0Z7O6dOyisj4JdrOrLVJ4KbHjVNz9yI1vO08FAYQPcnkXY9BP8zeYo+J/nBgY6Gg4R7w4WFWtg==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.2.0.tgz", + "integrity": "sha512-vklfVRyylayGV/zMwVEkTC9kBhA3t264hoUHV/yGuunBJh6uBGP1VlzhOp8EsqxpKnG0xkLE1qHZlU0+t8Vh6Q==", "requires": { "asn1": "^0.2.4", "bcrypt-pbkdf": "^1.0.2", @@ -5166,9 +5219,9 @@ }, "dependencies": { "ajv": { - "version": "8.6.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.0.tgz", - "integrity": "sha512-cnUG4NSBiM4YFBxgZIj/In3/6KX+rQ2l2YPRVcvAMQGWEPKuXoPIhxzwqh31jA3IPbI4qEOp/5ILI4ynioXsGQ==", + "version": "8.6.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.2.tgz", + "integrity": "sha512-9807RlWAgT564wT+DjeyU5OFMPjmzxVobvDFmNAhY+5zD6A2ly3jDp6sgnfyDtlIQ+7H97oc/DGCzzfu9rjw9w==", "dev": true, "requires": { "fast-deep-equal": "^3.1.1", @@ -5244,9 +5297,9 @@ "dev": true }, "uglify-js": { - "version": "3.13.9", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.13.9.tgz", - "integrity": "sha512-wZbyTQ1w6Y7fHdt8sJnHfSIuWeDgk6B5rCb4E/AM6QNNPbOMIZph21PW5dRB3h7Df0GszN+t7RuUH6sWK5bF0g==", + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.14.1.tgz", + "integrity": "sha512-JhS3hmcVaXlp/xSo3PKY5R0JqKs5M3IV+exdLHW99qKvKivPO4Z8qbej6mte17SOPqAOVMjt/XGgWacnFSzM3g==", "optional": true }, "uri-js": { @@ -5388,9 +5441,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "yargs": { - "version": "17.0.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz", - "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==", + "version": "17.1.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.0.tgz", + "integrity": "sha512-SQr7qqmQ2sNijjJGHL4u7t8vyDZdZ3Ahkmo4sc1w5xI9TBX0QDdG/g4SFnxtWOsGLjwHQue57eFALfwFCnixgg==", "requires": { "cliui": "^7.0.2", "escalade": "^3.1.1", diff --git a/package.json b/package.json index 0027148..51ccbce 100644 --- a/package.json +++ b/package.json @@ -24,6 +24,7 @@ "grpc-uds": "^0.1.6", "handlebars": "^4.7.7", "js-yaml": "^4.0.0", + "lodash": "^4.17.21", "lru-cache": "^6.0.0", "request": "^2.88.2", "semver": "^7.3.4", diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 6f45ed2..52d2928 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -482,8 +482,6 @@ class SynologyHttpClient { ); } - - async CreateClonedVolume(src_lun_uuid, dst_lun_name) { const create_cloned_volume = { api: "SYNO.Core.ISCSI.LUN", @@ -493,7 +491,7 @@ class SynologyHttpClient { dst_lun_name: dst_lun_name, // dst lun name is_same_pool: true, // always true? string? clone_type: "democratic-csi", // check - } + }; return await this.do_request("GET", "entry.cgi", create_cloned_volume); } @@ -506,11 +504,13 @@ class SynologyHttpClient { snapshot_uuid: JSON.stringify(snapshot_uuid), // shaptop uuid cloned_lun_name: cloned_lun_name, // cloned lun name clone_type: "democratic-csi", // check - } - return await this.do_request("GET", "entry.cgi", create_volume_from_snapshot); + }; + return await this.do_request( + "GET", + "entry.cgi", + create_volume_from_snapshot + ); } - - } module.exports.SynologyHttpClient = SynologyHttpClient; diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index 01158a9..5952e27 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -59,7 +59,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { "GET_CAPACITY", "CREATE_DELETE_SNAPSHOT", //"LIST_SNAPSHOTS", - //"CLONE_VOLUME", + "CLONE_VOLUME", //"PUBLISH_READONLY", "EXPAND_VOLUME", ]; @@ -478,13 +478,21 @@ class ControllerSynologyDriver extends CsiBaseDriver { // therefore we continue to search for the lun after delete success call to ensure full deletion await httpClient.DeleteLun(lun_uuid); - let settleEnabled = driver.options.api.lunDelete.settleEnabled; + //let settleEnabled = driver.options.api.lunDelete.settleEnabled; + let settleEnabled = true; if (settleEnabled) { let currentCheck = 0; + + /* let settleMaxRetries = driver.options.api.lunDelete.settleMaxRetries || 6; let settleSeconds = driver.options.api.lunDelete.settleSeconds || 5; + */ + + let settleMaxRetries = 6; + let settleSeconds = 5; + let waitTimeBetweenChecks = settleSeconds * 1000; await sleep(waitTimeBetweenChecks); diff --git a/src/driver/index.js b/src/driver/index.js index fe37c43..bdf1d13 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -1,3 +1,4 @@ +const _ = require("lodash"); const os = require("os"); const fs = require("fs"); const { GrpcError, grpc } = require("../utils/grpc"); @@ -17,7 +18,23 @@ const sleep = require("../utils/general").sleep; class CsiBaseDriver { constructor(ctx, options) { this.ctx = ctx; - this.options = options; + this.options = options || {}; + + if (!this.options.hasOwnProperty("node")) { + this.options.node = {}; + } + + if (!this.options.node.hasOwnProperty("format")) { + this.options.node.format = {}; + } + + if (!this.options.node.hasOwnProperty("mount")) { + this.options.node.mount = {}; + } + + if (!this.options.node.mount.hasOwnProperty("checkFilesystem")) { + this.options.node.mount.checkFilesystem = {}; + } } /** @@ -490,7 +507,15 @@ class CsiBaseDriver { // format result = await filesystem.deviceIsFormatted(device); if (!result) { - await filesystem.formatDevice(device, fs_type); + let formatOptions = _.get( + driver.options.node.format, + [fs_type, "customOptions"], + [] + ); + if (!Array.isArray(formatOptions)) { + formatOptions = []; + } + await filesystem.formatDevice(device, fs_type, formatOptions); } let fs_info = await filesystem.getDeviceFilesystemInfo(device); @@ -502,9 +527,17 @@ class CsiBaseDriver { staging_target_path ); if (!result) { - // TODO: add a parameter to control this behavior // https://github.com/democratic-csi/democratic-csi/issues/52#issuecomment-768463401 - //await filesystem.checkFilesystem(device, fs_type); + let checkFilesystem = + driver.options.node.mount.checkFilesystem[fs_type] || {}; + if (checkFilesystem.enabled) { + await filesystem.checkFilesystem( + device, + fs_type, + checkFilesystem.customOptions || [], + checkFilesystem.customFilesystemOptions || [] + ); + } } } break; From 8540cf44deb2846503041e733518c052785a03fc Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 5 Aug 2021 23:13:38 -0600 Subject: [PATCH 26/44] stringify lun_uuid Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/http/index.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 52d2928..97e5e40 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -399,6 +399,8 @@ class SynologyHttpClient { version: 1, }); + data.src_lun_uuid = JSON.stringify(data.src_lun_uuid); + return await this.do_request("GET", "entry.cgi", data); } From b5567bb39037fb3e09b216e4e180f96fe78d102a Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 5 Aug 2021 23:32:40 -0600 Subject: [PATCH 27/44] more uuid stringify fun Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/http/index.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 97e5e40..bdba28f 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -255,7 +255,7 @@ class SynologyHttpClient { api: "SYNO.Core.ISCSI.LUN", method: "delete_snapshot", version: 1, - snapshot_uuid: snapshot_uuid, // snapshot_id + snapshot_uuid: JSON.stringify(snapshot_uuid), // snapshot_id deleted_by: "democratic_csi", // ? }; @@ -480,7 +480,7 @@ class SynologyHttpClient { return await this.do_request( "GET", "entry.cgi", - Object.assign({}, iscsi_lun_extend, { uuid: uuid, new_size: size }) + Object.assign({}, iscsi_lun_extend, { uuid: JSON.stringify(uuid), new_size: size }) ); } @@ -502,7 +502,7 @@ class SynologyHttpClient { api: "SYNO.Core.ISCSI.LUN", version: 1, method: "clone_snapshot", - src_lun_uuid: src_lun_uuid, // src lun uuid, snapshot id? + src_lun_uuid: JSON.stringify(src_lun_uuid), // src lun uuid, snapshot id? snapshot_uuid: JSON.stringify(snapshot_uuid), // shaptop uuid cloned_lun_name: cloned_lun_name, // cloned lun name clone_type: "democratic-csi", // check From 1315937fd865caf06a87ce7d25d011a7afaf8da2 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Fri, 6 Aug 2021 00:03:22 -0600 Subject: [PATCH 28/44] proper await Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/index.js | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index 5952e27..c377f6b 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -304,13 +304,13 @@ class ControllerSynologyDriver extends CsiBaseDriver { } break; case "volume": - let srcLunName = driver.buildIscsiName( - volume_content_source.volume.volume_id - ); - src_lun_uuid = await httpClient.GetLunUUIDByName(srcLunName); - - existingLun = httpClient.GetLunByName(iscsiName); + existingLun = await httpClient.GetLunByName(iscsiName); if (!existingLun) { + let srcLunName = driver.buildIscsiName( + volume_content_source.volume.volume_id + ); + + src_lun_uuid = await httpClient.GetLunUUIDByName(srcLunName); await httpClient.CreateClonedVolume(src_lun_uuid, iscsiName); } break; From 3d462e7d09cbcdfe47251db52e6aa72b26148b4c Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Fri, 6 Aug 2021 00:35:31 -0600 Subject: [PATCH 29/44] fix modifying by reference Signed-off-by: Travis Glenn Hansen --- src/utils/mount.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/mount.js b/src/utils/mount.js index 1b1b6ba..5ad935e 100644 --- a/src/utils/mount.js +++ b/src/utils/mount.js @@ -148,14 +148,14 @@ class Mount { async getMountDetails(path, extraOutputProperties = []) { const mount = this; let args = []; - const common_options = FINDMNT_COMMON_OPTIONS; + const common_options = JSON.parse(JSON.stringify(FINDMNT_COMMON_OPTIONS)); if (extraOutputProperties.length > 0) { common_options[1] = common_options[1] + "," + extraOutputProperties.join(","); } args = args.concat(["--mountpoint", path]); - args = args.concat(FINDMNT_COMMON_OPTIONS); + args = args.concat(common_options); let result; try { From 22031a84672714eb5b8370a53b0d67fe1d0bbd69 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 10 Aug 2021 11:59:44 -0600 Subject: [PATCH 30/44] support for csi v1.4.0 and v1.5.0, various misc improvements Signed-off-by: Travis Glenn Hansen --- bin/democratic-csi | 11 +- bin/liveness-probe | 13 +- csi_proto/csi-v1.4.0.proto | 1578 ++++++++++++++++ csi_proto/csi-v1.5.0.proto | 1635 +++++++++++++++++ package-lock.json | 82 +- package.json | 1 + src/driver/controller-client-common/index.js | 29 + src/driver/controller-synology/http/index.js | 8 +- src/driver/controller-synology/index.js | 31 + src/driver/controller-zfs-ssh/index.js | 31 +- src/driver/freenas/api.js | 134 +- src/driver/index.js | 24 +- src/driver/node-manual/index.js | 32 + .../zfs-local-ephemeral-inline/index.js | 32 + 14 files changed, 3569 insertions(+), 72 deletions(-) create mode 100644 csi_proto/csi-v1.4.0.proto create mode 100644 csi_proto/csi-v1.5.0.proto diff --git a/bin/democratic-csi b/bin/democratic-csi index e479bf2..6e3e698 100755 --- a/bin/democratic-csi +++ b/bin/democratic-csi @@ -32,7 +32,16 @@ const args = require("yargs") }) .option("csi-version", { describe: "versin of the csi spec to load", - choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0", "1.3.0"], + choices: [ + "0.2.0", + "0.3.0", + "1.0.0", + "1.1.0", + "1.2.0", + "1.3.0", + "1.4.0", + "1.5.0", + ], }) .demandOption(["csi-version"], "csi-version is required") .option("csi-name", { diff --git a/bin/liveness-probe b/bin/liveness-probe index 4218cd8..d8b50ce 100755 --- a/bin/liveness-probe +++ b/bin/liveness-probe @@ -10,7 +10,16 @@ const args = require("yargs") .usage("$0 [options]") .option("csi-version", { describe: "versin of the csi spec to load", - choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0", "1.3.0"], + choices: [ + "0.2.0", + "0.3.0", + "1.0.0", + "1.1.0", + "1.2.0", + "1.3.0", + "1.4.0", + "1.5.0", + ], }) .demandOption(["csi-version"], "csi-version is required") .option("csi-address", { @@ -49,7 +58,7 @@ const clientIdentity = new csi.Identity( /** * Probe the identity service and check for ready state - * + * * https://github.com/kubernetes-csi/livenessprobe/blob/master/cmd/livenessprobe/main.go * https://github.com/kubernetes-csi/csi-lib-utils/blob/master/rpc/common.go */ diff --git a/csi_proto/csi-v1.4.0.proto b/csi_proto/csi-v1.4.0.proto new file mode 100644 index 0000000..4e9c99e --- /dev/null +++ b/csi_proto/csi-v1.4.0.proto @@ -0,0 +1,1578 @@ +// Code generated by make; DO NOT EDIT. +syntax = "proto3"; +package csi.v1; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "csi"; + +extend google.protobuf.EnumOptions { + // Indicates that this enum is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_enum = 1060; +} +extend google.protobuf.EnumValueOptions { + // Indicates that this enum value is OPTIONAL and part of an + // experimental API that may be deprecated and eventually removed + // between minor releases. + bool alpha_enum_value = 1060; +} +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; + + // Indicates that this field is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_field = 1060; +} +extend google.protobuf.MessageOptions { + // Indicates that this message is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_message = 1060; +} +extend google.protobuf.MethodOptions { + // Indicates that this method is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_method = 1060; +} +extend google.protobuf.ServiceOptions { + // Indicates that this service is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_service = 1060; +} +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} + + rpc CreateSnapshot (CreateSnapshotRequest) + returns (CreateSnapshotResponse) {} + + rpc DeleteSnapshot (DeleteSnapshotRequest) + returns (DeleteSnapshotResponse) {} + + rpc ListSnapshots (ListSnapshotsRequest) + returns (ListSnapshotsResponse) {} + + rpc ControllerExpandVolume (ControllerExpandVolumeRequest) + returns (ControllerExpandVolumeResponse) {} + + rpc ControllerGetVolume (ControllerGetVolumeRequest) + returns (ControllerGetVolumeResponse) { + option (alpha_method) = true; + } +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} + + + rpc NodeExpandVolume(NodeExpandVolumeRequest) + returns (NodeExpandVolumeResponse) {} + + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} + + rpc NodeGetInfo (NodeGetInfoRequest) + returns (NodeGetInfoResponse) {} +} +message GetPluginInfoRequest { + // Intentionally empty. +} + +message GetPluginInfoResponse { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { + // Intentionally empty. +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 1; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; + } + Type type = 1; + } + + message VolumeExpansion { + enum Type { + UNKNOWN = 0; + + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + ONLINE = 1; + + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + OFFLINE = 2; + } + Type type = 1; + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + VolumeExpansion volume_expansion = 2; + } +} +message ProbeRequest { + // Intentionally empty. +} + +message ProbeResponse { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + .google.protobuf.BoolValue ready = 1; +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource volume_content_source = 6; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + TopologyRequirement accessibility_requirements = 7; +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +message VolumeContentSource { + message SnapshotSource { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; + } + + oneof type { + SnapshotSource snapshot = 1; + VolumeSource volume = 2; + } +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// Information about a specific volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; + + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + VolumeContentSource content_source = 4; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + repeated Topology accessible_topology = 5; +} + +message TopologyRequirement { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + repeated Topology requisite = 1; + + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + repeated Topology preferred = 2; +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +message Topology { + map segments = 1; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteVolumeResponse { + // Intentionally empty. +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + string node_id = 2; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 6; +} + +message ControllerPublishVolumeResponse { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerUnpublishVolumeResponse { + // Intentionally empty. +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; + + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ValidateVolumeCapabilitiesResponse { + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message VolumeStatus{ + // A list of all `node_id` of nodes that the volume in this entry + // is controller published on. + // This field is OPTIONAL. If it is not specified and the SP has + // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO + // MAY assume the volume is not controller published to any nodes. + // If the field is not specified and the SP does not have the + // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST + // not interpret this field. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + repeated string published_node_ids = 1; + + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the + // VOLUME_CONDITION controller capability is supported. + VolumeCondition volume_condition = 2 [(alpha_field) = true]; + } + + message Entry { + // This field is REQUIRED + Volume volume = 1; + + // This field is OPTIONAL. This field MUST be specified if the + // LIST_VOLUMES_PUBLISHED_NODES controller capability is + // supported. + VolumeStatus status = 2; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message ControllerGetVolumeRequest { + option (alpha_message) = true; + + // The ID of the volume to fetch current volume information for. + // This field is REQUIRED. + string volume_id = 1; +} + +message ControllerGetVolumeResponse { + option (alpha_message) = true; + + message VolumeStatus{ + // A list of all the `node_id` of nodes that this volume is + // controller published on. + // This field is OPTIONAL. + // This field MUST be specified if the PUBLISH_UNPUBLISH_VOLUME + // controller capability is supported. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + repeated string published_node_ids = 1; + + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the + // VOLUME_CONDITION controller capability is supported. + VolumeCondition volume_condition = 2; + } + + // This field is REQUIRED + Volume volume = 1; + + // This field is REQUIRED. + VolumeStatus status = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + Topology accessible_topology = 3; +} + +message GetCapacityResponse { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; + + // The largest size that may be used in a + // CreateVolumeRequest.capacity_range.required_bytes field + // to create a volume with the same parameters as those in + // GetCapacityRequest. + // + // If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the minimum volume size of the + // storage. + // + // This field is OPTIONAL. MUST NOT be negative. + // The Plugin SHOULD provide a value for this field if it has + // a maximum size for individual volumes and leave it unset + // otherwise. COs MAY use it to make decision about + // where to create volumes. + google.protobuf.Int64Value maximum_volume_size = 2 + [(alpha_field) = true]; + + // The smallest size that may be used in a + // CreateVolumeRequest.capacity_range.limit_bytes field + // to create a volume with the same parameters as those in + // GetCapacityRequest. + // + // If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the maximum volume size of the + // storage. + // + // This field is OPTIONAL. MUST NOT be negative. + // The Plugin SHOULD provide a value for this field if it has + // a minimum size for individual volumes and leave it unset + // otherwise. COs MAY use it to make decision about + // where to create volumes. + google.protobuf.Int64Value minimum_volume_size = 3 + [(alpha_field) = true]; +} +message ControllerGetCapabilitiesRequest { + // Intentionally empty. +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 1; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + CREATE_DELETE_SNAPSHOT = 5; + LIST_SNAPSHOTS = 6; + + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; + + // See VolumeExpansion for details. + EXPAND_VOLUME = 9; + + // Indicates the SP supports the + // ListVolumesResponse.entry.published_nodes field + LIST_VOLUMES_PUBLISHED_NODES = 10; + + // Indicates that the Controller service can report volume + // conditions. + // An SP MAY implement `VolumeCondition` in only the Controller + // Plugin, only the Node Plugin, or both. + // If `VolumeCondition` is implemented in both the Controller and + // Node Plugins, it SHALL report from different perspectives. + // If for some reason Controller and Node Plugins report + // misaligned volume conditions, CO SHALL assume the worst case + // is the truth. + // Note that, for alpha, `VolumeCondition` is intended be + // informative for humans only, not for automation. + VOLUME_CONDITION = 11 [(alpha_enum_value) = true]; + + // Indicates the SP supports the ControllerGetVolume RPC. + // This enables COs to, for example, fetch per volume + // condition after a volume is provisioned. + GET_VOLUME = 12 [(alpha_enum_value) = true]; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message CreateSnapshotRequest { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + string source_volume_id = 1; + + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 2; + + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + map parameters = 4; +} + +message CreateSnapshotResponse { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot snapshot = 1; +} + +// Information about a specific snapshot. +message Snapshot { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + int64 size_bytes = 1; + + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; + + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + string source_volume_id = 3; + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; + + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; +} +message DeleteSnapshotRequest { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + string snapshot_id = 1; + + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteSnapshotResponse {} +// List all snapshots on the storage system regardless of how they were +// created. +message ListSnapshotsRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; + + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + string source_volume_id = 3; + + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + string snapshot_id = 4; + + // Secrets required by plugin to complete ListSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ListSnapshotsResponse { + message Entry { + Snapshot snapshot = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message ControllerExpandVolumeRequest { + // The ID of the volume to expand. This field is REQUIRED. + string volume_id = 1; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. This field is REQUIRED. + CapacityRange capacity_range = 2; + + // Secrets required by the plugin for expanding the volume. + // This field is OPTIONAL. + map secrets = 3 [(csi_secret) = true]; + + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is + // being used as a block device - the SP MAY set + // node_expansion_required to false in ControllerExpandVolumeResponse + // to skip invocation of NodeExpandVolume on the node by the CO. + // This is an OPTIONAL field. + VolumeCapability volume_capability = 4; +} + +message ControllerExpandVolumeResponse { + // Capacity of volume after expansion. This field is REQUIRED. + int64 capacity_bytes = 1; + + // Whether node expansion is required for the volume. When true + // the CO MUST make NodeExpandVolume RPC call on the node. This field + // is REQUIRED. + bool node_expansion_required = 2; +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 3; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 6; +} + +message NodeStageVolumeResponse { + // Intentionally empty. +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { + // Intentionally empty. +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 7 [(csi_secret) = true]; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 8; +} + +message NodePublishVolumeResponse { + // Intentionally empty. +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { + // Intentionally empty. +} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string volume_path = 2; + + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 3; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the VOLUME_CONDITION node + // capability is supported. + VolumeCondition volume_condition = 2 [(alpha_field) = true]; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; +} + +// VolumeCondition represents the current condition of a volume. +message VolumeCondition { + option (alpha_message) = true; + + // Normal volumes are available for use and operating optimally. + // An abnormal volume does not meet these criteria. + // This field is REQUIRED. + bool abnormal = 1; + + // The message describing the condition of the volume. + // This field is REQUIRED. + string message = 2; +} +message NodeGetCapabilitiesRequest { + // Intentionally empty. +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; + // See VolumeExpansion for details. + EXPAND_VOLUME = 3; + // Indicates that the Node service can report volume conditions. + // An SP MAY implement `VolumeCondition` in only the Node + // Plugin, only the Controller Plugin, or both. + // If `VolumeCondition` is implemented in both the Node and + // Controller Plugins, it SHALL report from different + // perspectives. + // If for some reason Node and Controller Plugins report + // misaligned volume conditions, CO SHALL assume the worst case + // is the truth. + // Note that, for alpha, `VolumeCondition` is intended to be + // informative for humans only, not for automation. + VOLUME_CONDITION = 4 [(alpha_enum_value) = true]; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeGetInfoRequest { +} + +message NodeGetInfoResponse { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + // This field overrides the general CSI size limit. + // The size of this field SHALL NOT exceed 192 bytes. The general + // CSI size limit, 128 byte, is RECOMMENDED for best backwards + // compatibility. + string node_id = 1; + + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + int64 max_volumes_per_node = 2; + + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + Topology accessible_topology = 3; +} +message NodeExpandVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path on which volume is available. This field is REQUIRED. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string volume_path = 2; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. If capacity_range is omitted then a plugin MAY + // inspect the file system of the volume to determine the maximum + // capacity to which the volume can be expanded. In such cases a + // plugin MAY expand the volume to its maximum capacity. + // This field is OPTIONAL. + CapacityRange capacity_range = 3; + + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is being + // used as a block device the SP MAY choose to skip expanding the + // filesystem in NodeExpandVolume implementation but still perform + // rest of the housekeeping needed for expanding the volume. If + // volume_capability is omitted the SP MAY determine + // access_type from given volume_path for the volume and perform + // node expansion. This is an OPTIONAL field. + VolumeCapability volume_capability = 5; +} + +message NodeExpandVolumeResponse { + // The capacity of the volume in bytes. This field is OPTIONAL. + int64 capacity_bytes = 1; +} diff --git a/csi_proto/csi-v1.5.0.proto b/csi_proto/csi-v1.5.0.proto new file mode 100644 index 0000000..f2ba8b9 --- /dev/null +++ b/csi_proto/csi-v1.5.0.proto @@ -0,0 +1,1635 @@ +// Code generated by make; DO NOT EDIT. +syntax = "proto3"; +package csi.v1; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "csi"; + +extend google.protobuf.EnumOptions { + // Indicates that this enum is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_enum = 1060; +} +extend google.protobuf.EnumValueOptions { + // Indicates that this enum value is OPTIONAL and part of an + // experimental API that may be deprecated and eventually removed + // between minor releases. + bool alpha_enum_value = 1060; +} +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; + + // Indicates that this field is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_field = 1060; +} +extend google.protobuf.MessageOptions { + // Indicates that this message is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_message = 1060; +} +extend google.protobuf.MethodOptions { + // Indicates that this method is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_method = 1060; +} +extend google.protobuf.ServiceOptions { + // Indicates that this service is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + bool alpha_service = 1060; +} +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} + + rpc CreateSnapshot (CreateSnapshotRequest) + returns (CreateSnapshotResponse) {} + + rpc DeleteSnapshot (DeleteSnapshotRequest) + returns (DeleteSnapshotResponse) {} + + rpc ListSnapshots (ListSnapshotsRequest) + returns (ListSnapshotsResponse) {} + + rpc ControllerExpandVolume (ControllerExpandVolumeRequest) + returns (ControllerExpandVolumeResponse) {} + + rpc ControllerGetVolume (ControllerGetVolumeRequest) + returns (ControllerGetVolumeResponse) { + option (alpha_method) = true; + } +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} + + + rpc NodeExpandVolume(NodeExpandVolumeRequest) + returns (NodeExpandVolumeResponse) {} + + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} + + rpc NodeGetInfo (NodeGetInfoRequest) + returns (NodeGetInfoResponse) {} +} +message GetPluginInfoRequest { + // Intentionally empty. +} + +message GetPluginInfoResponse { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { + // Intentionally empty. +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 1; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; + } + Type type = 1; + } + + message VolumeExpansion { + enum Type { + UNKNOWN = 0; + + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + ONLINE = 1; + + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + OFFLINE = 2; + } + Type type = 1; + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + VolumeExpansion volume_expansion = 2; + } +} +message ProbeRequest { + // Intentionally empty. +} + +message ProbeResponse { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + .google.protobuf.BoolValue ready = 1; +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource volume_content_source = 6; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + TopologyRequirement accessibility_requirements = 7; +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +message VolumeContentSource { + message SnapshotSource { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; + } + + oneof type { + SnapshotSource snapshot = 1; + VolumeSource volume = 2; + } +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + + // If SP has VOLUME_MOUNT_GROUP node capability and CO provides + // this field then SP MUST ensure that the volume_mount_group + // parameter is passed as the group identifier to the underlying + // operating system mount system call, with the understanding + // that the set of available mount call parameters and/or + // mount implementations may vary across operating systems. + // Additionally, new file and/or directory entries written to + // the underlying filesystem SHOULD be permission-labeled in such a + // manner, unless otherwise modified by a workload, that they are + // both readable and writable by said mount group identifier. + // This is an OPTIONAL field. + string volume_mount_group = 3 [(alpha_field) = true]; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + + // Can only be published once as read/write at a single workload + // on a single node, at any given time. SHOULD be used instead of + // SINGLE_NODE_WRITER for COs using the experimental + // SINGLE_NODE_MULTI_WRITER capability. + SINGLE_NODE_SINGLE_WRITER = 6 [(alpha_enum_value) = true]; + + // Can be published as read/write at multiple workloads on a + // single node simultaneously. SHOULD be used instead of + // SINGLE_NODE_WRITER for COs using the experimental + // SINGLE_NODE_MULTI_WRITER capability. + SINGLE_NODE_MULTI_WRITER = 7 [(alpha_enum_value) = true]; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// Information about a specific volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; + + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + VolumeContentSource content_source = 4; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + repeated Topology accessible_topology = 5; +} + +message TopologyRequirement { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + repeated Topology requisite = 1; + + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + repeated Topology preferred = 2; +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +message Topology { + map segments = 1; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteVolumeResponse { + // Intentionally empty. +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + string node_id = 2; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 6; +} + +message ControllerPublishVolumeResponse { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerUnpublishVolumeResponse { + // Intentionally empty. +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; + + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ValidateVolumeCapabilitiesResponse { + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message VolumeStatus{ + // A list of all `node_id` of nodes that the volume in this entry + // is controller published on. + // This field is OPTIONAL. If it is not specified and the SP has + // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO + // MAY assume the volume is not controller published to any nodes. + // If the field is not specified and the SP does not have the + // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST + // not interpret this field. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + repeated string published_node_ids = 1; + + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the + // VOLUME_CONDITION controller capability is supported. + VolumeCondition volume_condition = 2 [(alpha_field) = true]; + } + + message Entry { + // This field is REQUIRED + Volume volume = 1; + + // This field is OPTIONAL. This field MUST be specified if the + // LIST_VOLUMES_PUBLISHED_NODES controller capability is + // supported. + VolumeStatus status = 2; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message ControllerGetVolumeRequest { + option (alpha_message) = true; + + // The ID of the volume to fetch current volume information for. + // This field is REQUIRED. + string volume_id = 1; +} + +message ControllerGetVolumeResponse { + option (alpha_message) = true; + + message VolumeStatus{ + // A list of all the `node_id` of nodes that this volume is + // controller published on. + // This field is OPTIONAL. + // This field MUST be specified if the PUBLISH_UNPUBLISH_VOLUME + // controller capability is supported. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + repeated string published_node_ids = 1; + + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the + // VOLUME_CONDITION controller capability is supported. + VolumeCondition volume_condition = 2; + } + + // This field is REQUIRED + Volume volume = 1; + + // This field is REQUIRED. + VolumeStatus status = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + Topology accessible_topology = 3; +} + +message GetCapacityResponse { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; + + // The largest size that may be used in a + // CreateVolumeRequest.capacity_range.required_bytes field + // to create a volume with the same parameters as those in + // GetCapacityRequest. + // + // If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the minimum volume size of the + // storage. + // + // This field is OPTIONAL. MUST NOT be negative. + // The Plugin SHOULD provide a value for this field if it has + // a maximum size for individual volumes and leave it unset + // otherwise. COs MAY use it to make decision about + // where to create volumes. + google.protobuf.Int64Value maximum_volume_size = 2 + [(alpha_field) = true]; + + // The smallest size that may be used in a + // CreateVolumeRequest.capacity_range.limit_bytes field + // to create a volume with the same parameters as those in + // GetCapacityRequest. + // + // If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the maximum volume size of the + // storage. + // + // This field is OPTIONAL. MUST NOT be negative. + // The Plugin SHOULD provide a value for this field if it has + // a minimum size for individual volumes and leave it unset + // otherwise. COs MAY use it to make decision about + // where to create volumes. + google.protobuf.Int64Value minimum_volume_size = 3 + [(alpha_field) = true]; +} +message ControllerGetCapabilitiesRequest { + // Intentionally empty. +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 1; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + CREATE_DELETE_SNAPSHOT = 5; + LIST_SNAPSHOTS = 6; + + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; + + // See VolumeExpansion for details. + EXPAND_VOLUME = 9; + + // Indicates the SP supports the + // ListVolumesResponse.entry.published_nodes field + LIST_VOLUMES_PUBLISHED_NODES = 10; + + // Indicates that the Controller service can report volume + // conditions. + // An SP MAY implement `VolumeCondition` in only the Controller + // Plugin, only the Node Plugin, or both. + // If `VolumeCondition` is implemented in both the Controller and + // Node Plugins, it SHALL report from different perspectives. + // If for some reason Controller and Node Plugins report + // misaligned volume conditions, CO SHALL assume the worst case + // is the truth. + // Note that, for alpha, `VolumeCondition` is intended be + // informative for humans only, not for automation. + VOLUME_CONDITION = 11 [(alpha_enum_value) = true]; + + // Indicates the SP supports the ControllerGetVolume RPC. + // This enables COs to, for example, fetch per volume + // condition after a volume is provisioned. + GET_VOLUME = 12 [(alpha_enum_value) = true]; + + // Indicates the SP supports the SINGLE_NODE_SINGLE_WRITER and/or + // SINGLE_NODE_MULTI_WRITER access modes. + // These access modes are intended to replace the + // SINGLE_NODE_WRITER access mode to clarify the number of writers + // for a volume on a single node. Plugins MUST accept and allow + // use of the SINGLE_NODE_WRITER access mode when either + // SINGLE_NODE_SINGLE_WRITER and/or SINGLE_NODE_MULTI_WRITER are + // supported, in order to permit older COs to continue working. + SINGLE_NODE_MULTI_WRITER = 13 [(alpha_enum_value) = true]; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message CreateSnapshotRequest { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + string source_volume_id = 1; + + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 2; + + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + map parameters = 4; +} + +message CreateSnapshotResponse { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot snapshot = 1; +} + +// Information about a specific snapshot. +message Snapshot { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + int64 size_bytes = 1; + + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; + + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + string source_volume_id = 3; + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; + + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; +} +message DeleteSnapshotRequest { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + string snapshot_id = 1; + + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteSnapshotResponse {} +// List all snapshots on the storage system regardless of how they were +// created. +message ListSnapshotsRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; + + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + string source_volume_id = 3; + + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + string snapshot_id = 4; + + // Secrets required by plugin to complete ListSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ListSnapshotsResponse { + message Entry { + Snapshot snapshot = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message ControllerExpandVolumeRequest { + // The ID of the volume to expand. This field is REQUIRED. + string volume_id = 1; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. This field is REQUIRED. + CapacityRange capacity_range = 2; + + // Secrets required by the plugin for expanding the volume. + // This field is OPTIONAL. + map secrets = 3 [(csi_secret) = true]; + + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is + // being used as a block device - the SP MAY set + // node_expansion_required to false in ControllerExpandVolumeResponse + // to skip invocation of NodeExpandVolume on the node by the CO. + // This is an OPTIONAL field. + VolumeCapability volume_capability = 4; +} + +message ControllerExpandVolumeResponse { + // Capacity of volume after expansion. This field is REQUIRED. + int64 capacity_bytes = 1; + + // Whether node expansion is required for the volume. When true + // the CO MUST make NodeExpandVolume RPC call on the node. This field + // is REQUIRED. + bool node_expansion_required = 2; +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 3; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 6; +} + +message NodeStageVolumeResponse { + // Intentionally empty. +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { + // Intentionally empty. +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 7 [(csi_secret) = true]; + + // Volume context as returned by SP in + // CreateVolumeResponse.Volume.volume_context. + // This field is OPTIONAL and MUST match the volume_context of the + // volume identified by `volume_id`. + map volume_context = 8; +} + +message NodePublishVolumeResponse { + // Intentionally empty. +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { + // Intentionally empty. +} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string volume_path = 2; + + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 3; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the VOLUME_CONDITION node + // capability is supported. + VolumeCondition volume_condition = 2 [(alpha_field) = true]; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; +} + +// VolumeCondition represents the current condition of a volume. +message VolumeCondition { + option (alpha_message) = true; + + // Normal volumes are available for use and operating optimally. + // An abnormal volume does not meet these criteria. + // This field is REQUIRED. + bool abnormal = 1; + + // The message describing the condition of the volume. + // This field is REQUIRED. + string message = 2; +} +message NodeGetCapabilitiesRequest { + // Intentionally empty. +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; + // See VolumeExpansion for details. + EXPAND_VOLUME = 3; + // Indicates that the Node service can report volume conditions. + // An SP MAY implement `VolumeCondition` in only the Node + // Plugin, only the Controller Plugin, or both. + // If `VolumeCondition` is implemented in both the Node and + // Controller Plugins, it SHALL report from different + // perspectives. + // If for some reason Node and Controller Plugins report + // misaligned volume conditions, CO SHALL assume the worst case + // is the truth. + // Note that, for alpha, `VolumeCondition` is intended to be + // informative for humans only, not for automation. + VOLUME_CONDITION = 4 [(alpha_enum_value) = true]; + + // Indicates the SP supports the SINGLE_NODE_SINGLE_WRITER and/or + // SINGLE_NODE_MULTI_WRITER access modes. + // These access modes are intended to replace the + // SINGLE_NODE_WRITER access mode to clarify the number of writers + // for a volume on a single node. Plugins MUST accept and allow + // use of the SINGLE_NODE_WRITER access mode (subject to the + // processing rules for NodePublishVolume), when either + // SINGLE_NODE_SINGLE_WRITER and/or SINGLE_NODE_MULTI_WRITER are + // supported, in order to permit older COs to continue working. + SINGLE_NODE_MULTI_WRITER = 5 [(alpha_enum_value) = true]; + + // Indicates that Node service supports mounting volumes + // with provided volume group identifier during node stage + // or node publish RPC calls. + VOLUME_MOUNT_GROUP = 6 [(alpha_enum_value) = true]; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeGetInfoRequest { +} + +message NodeGetInfoResponse { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + // This field overrides the general CSI size limit. + // The size of this field SHALL NOT exceed 256 bytes. The general + // CSI size limit, 128 byte, is RECOMMENDED for best backwards + // compatibility. + string node_id = 1; + + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + int64 max_volumes_per_node = 2; + + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + Topology accessible_topology = 3; +} +message NodeExpandVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path on which volume is available. This field is REQUIRED. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string volume_path = 2; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. If capacity_range is omitted then a plugin MAY + // inspect the file system of the volume to determine the maximum + // capacity to which the volume can be expanded. In such cases a + // plugin MAY expand the volume to its maximum capacity. + // This field is OPTIONAL. + CapacityRange capacity_range = 3; + + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + // This field overrides the general CSI size limit. + // SP SHOULD support the maximum path length allowed by the operating + // system/filesystem, but, at a minimum, SP MUST accept a max path + // length of at least 128 bytes. + string staging_target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is being + // used as a block device the SP MAY choose to skip expanding the + // filesystem in NodeExpandVolume implementation but still perform + // rest of the housekeeping needed for expanding the volume. If + // volume_capability is omitted the SP MAY determine + // access_type from given volume_path for the volume and perform + // node expansion. This is an OPTIONAL field. + VolumeCapability volume_capability = 5; + + // Secrets required by plugin to complete node expand volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 6 + [(csi_secret) = true, (alpha_field) = true]; +} + +message NodeExpandVolumeResponse { + // The capacity of the volume in bytes. This field is OPTIONAL. + int64 capacity_bytes = 1; +} diff --git a/package-lock.json b/package-lock.json index 22ca674..ef2600e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,6 +8,7 @@ "version": "1.2.0", "license": "MIT", "dependencies": { + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.6.0", "async-mutex": "^0.3.1", "bunyan": "^1.8.15", @@ -183,6 +184,17 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/@grpc/grpc-js": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.6.tgz", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", + "dependencies": { + "@types/node": ">=12.12.47" + }, + "engines": { + "node": "^8.13.0 || >=10.10.0" + } + }, "node_modules/@grpc/proto-loader": { "version": "0.6.4", "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.4.tgz", @@ -442,7 +454,8 @@ "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "devOptional": true }, "node_modules/bcrypt-pbkdf": { "version": "1.0.2", @@ -456,6 +469,7 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "devOptional": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -545,14 +559,6 @@ "wrap-ansi": "^7.0.0" } }, - "node_modules/code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", @@ -639,7 +645,8 @@ "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "devOptional": true }, "node_modules/core-util-is": { "version": "1.0.2", @@ -1119,7 +1126,8 @@ "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true }, "node_modules/functional-red-black-tree": { "version": "1.0.1", @@ -1147,6 +1155,7 @@ "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -1920,6 +1929,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "devOptional": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -2174,6 +2184,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "devOptional": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -2279,14 +2290,6 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, - "node_modules/number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -2299,6 +2302,7 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "devOptional": true, "dependencies": { "wrappy": "1" } @@ -2360,6 +2364,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "devOptional": true, "engines": { "node": ">=0.10.0" } @@ -3043,7 +3048,8 @@ "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "devOptional": true }, "node_modules/y18n": { "version": "5.0.8", @@ -3217,6 +3223,14 @@ } } }, + "@grpc/grpc-js": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.6.tgz", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", + "requires": { + "@types/node": ">=12.12.47" + } + }, "@grpc/proto-loader": { "version": "0.6.4", "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.4.tgz", @@ -3433,7 +3447,8 @@ "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "devOptional": true }, "bcrypt-pbkdf": { "version": "1.0.2", @@ -3447,6 +3462,7 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "devOptional": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -3514,10 +3530,6 @@ "wrap-ansi": "^7.0.0" } }, - "code-point-at": { - "version": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" - }, "color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", @@ -3594,7 +3606,8 @@ "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "devOptional": true }, "core-util-is": { "version": "1.0.2", @@ -3968,7 +3981,8 @@ "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true }, "functional-red-black-tree": { "version": "1.0.1", @@ -3993,6 +4007,7 @@ "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -4556,6 +4571,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "devOptional": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -4771,6 +4787,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "devOptional": true, "requires": { "brace-expansion": "^1.1.7" } @@ -4857,10 +4874,6 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, - "number-is-nan": { - "version": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" - }, "oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -4870,6 +4883,7 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "devOptional": true, "requires": { "wrappy": "1" } @@ -4921,7 +4935,8 @@ "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "devOptional": true }, "path-key": { "version": "3.1.1", @@ -5428,7 +5443,8 @@ "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "devOptional": true }, "y18n": { "version": "5.0.8", diff --git a/package.json b/package.json index 51ccbce..2b2398b 100644 --- a/package.json +++ b/package.json @@ -18,6 +18,7 @@ "url": "https://github.com/democratic-csi/democratic-csi.git" }, "dependencies": { + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.6.0", "async-mutex": "^0.3.1", "bunyan": "^1.8.15", diff --git a/src/driver/controller-client-common/index.js b/src/driver/controller-client-common/index.js index 9fe616b..18c30ee 100644 --- a/src/driver/controller-client-common/index.js +++ b/src/driver/controller-client-common/index.js @@ -1,6 +1,7 @@ const { CsiBaseDriver } = require("../index"); const { GrpcError, grpc } = require("../../utils/grpc"); const cp = require("child_process"); +const semver = require("semver"); /** * Crude nfs-client driver which simply creates directories to be mounted @@ -59,6 +60,20 @@ class ControllerClientCommonDriver extends CsiBaseDriver { //"PUBLISH_READONLY", //"EXPAND_VOLUME", ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + options.service.controller.capabilities.rpc + .push + //"VOLUME_CONDITION", + //"GET_VOLUME" + (); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.controller.capabilities.rpc.push( + "SINGLE_NODE_MULTI_WRITER" + ); + } } if (!("rpc" in options.service.node.capabilities)) { @@ -70,6 +85,18 @@ class ControllerClientCommonDriver extends CsiBaseDriver { "GET_VOLUME_STATS", //"EXPAND_VOLUME" ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + //options.service.node.capabilities.rpc.push("VOLUME_CONDITION"); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER"); + /** + * This is for volumes that support a mount time gid such as smb or fat + */ + //options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP"); + } } } @@ -98,6 +125,8 @@ class ControllerClientCommonDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index bdba28f..0e7ea50 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -89,7 +89,13 @@ class SynologyHttpClient { switch (method) { case "GET": - options.qs = data; + let qsData = JSON.parse(JSON.stringify(data)); + for (let p in qsData) { + if (Array.isArray(qsData[p])) { + qsData[p] = JSON.stringify(qsData[p]); + } + } + options.qs = qsData; break; default: if (invoke_options.use_form_encoded) { diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index c377f6b..f3c51d5 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -1,6 +1,7 @@ const { CsiBaseDriver } = require("../index"); const { GrpcError, grpc } = require("../../utils/grpc"); const SynologyHttpClient = require("./http").SynologyHttpClient; +const semver = require("semver"); const sleep = require("../../utils/general").sleep; /** @@ -63,6 +64,20 @@ class ControllerSynologyDriver extends CsiBaseDriver { //"PUBLISH_READONLY", "EXPAND_VOLUME", ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + options.service.controller.capabilities.rpc + .push + //"VOLUME_CONDITION", + //"GET_VOLUME" (would need to properly handle volume_content_source) + (); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.controller.capabilities.rpc.push( + "SINGLE_NODE_MULTI_WRITER" + ); + } } if (!("rpc" in options.service.node.capabilities)) { @@ -78,6 +93,18 @@ class ControllerSynologyDriver extends CsiBaseDriver { if (driverResourceType == "volume") { options.service.node.capabilities.rpc.push("EXPAND_VOLUME"); } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + //options.service.node.capabilities.rpc.push("VOLUME_CONDITION"); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER"); + /** + * This is for volumes that support a mount time gid such as smb or fat + */ + //options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP"); + } } } @@ -152,6 +179,8 @@ class ControllerSynologyDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", @@ -180,6 +209,8 @@ class ControllerSynologyDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", diff --git a/src/driver/controller-zfs-ssh/index.js b/src/driver/controller-zfs-ssh/index.js index 51c5752..92b8516 100644 --- a/src/driver/controller-zfs-ssh/index.js +++ b/src/driver/controller-zfs-ssh/index.js @@ -90,9 +90,20 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { "CLONE_VOLUME", //"PUBLISH_READONLY", "EXPAND_VOLUME", - //"VOLUME_CONDITION", // added in v1.3.0 - //"GET_VOLUME", // added in v1.3.0 ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + options.service.controller.capabilities.rpc.push( + //"VOLUME_CONDITION", + "GET_VOLUME" + ); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.controller.capabilities.rpc.push( + "SINGLE_NODE_MULTI_WRITER" + ); + } } if (!("rpc" in options.service.node.capabilities)) { @@ -118,6 +129,18 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { ]; break; } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + //options.service.node.capabilities.rpc.push("VOLUME_CONDITION"); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER"); + /** + * This is for volumes that support a mount time gid such as smb or fat + */ + //options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP"); // in k8s is sent in as the security context fsgroup + } } } @@ -219,6 +242,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", @@ -247,6 +272,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", diff --git a/src/driver/freenas/api.js b/src/driver/freenas/api.js index 5da068b..ba77af4 100644 --- a/src/driver/freenas/api.js +++ b/src/driver/freenas/api.js @@ -95,9 +95,20 @@ class FreeNASApiDriver extends CsiBaseDriver { "CLONE_VOLUME", //"PUBLISH_READONLY", "EXPAND_VOLUME", - //"VOLUME_CONDITION", // added in v1.3.0 - //"GET_VOLUME", // added in v1.3.0 ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + options.service.controller.capabilities.rpc.push( + //"VOLUME_CONDITION", + "GET_VOLUME" + ); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.controller.capabilities.rpc.push( + "SINGLE_NODE_MULTI_WRITER" + ); + } } if (!("rpc" in options.service.node.capabilities)) { @@ -110,7 +121,6 @@ class FreeNASApiDriver extends CsiBaseDriver { "STAGE_UNSTAGE_VOLUME", "GET_VOLUME_STATS", //"EXPAND_VOLUME", - //"VOLUME_CONDITION", ]; break; case "volume": @@ -119,10 +129,21 @@ class FreeNASApiDriver extends CsiBaseDriver { "STAGE_UNSTAGE_VOLUME", "GET_VOLUME_STATS", "EXPAND_VOLUME", - //"VOLUME_CONDITION", ]; break; } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + //options.service.node.capabilities.rpc.push("VOLUME_CONDITION"); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER"); + /** + * This is for volumes that support a mount time gid such as smb or fat + */ + //options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP"); + } } } @@ -1582,6 +1603,7 @@ class FreeNASApiDriver extends CsiBaseDriver { async removeSnapshotsFromDatatset(datasetName, options = {}) { // TODO: alter the logic here to not be n+1 + // https://jira.ixsystems.com/browse/NAS-111708 const httpClient = await this.getHttpClient(); const httpApiClient = await this.getTrueNASHttpApiClient(); @@ -1604,7 +1626,15 @@ class FreeNASApiDriver extends CsiBaseDriver { throw new Error("unhandled statusCode: " + response.statusCode); } + /** + * Hypothetically this isn't needed. The middleware is supposed to reload stuff as appropriate. + * + * @param {*} call + * @param {*} datasetName + * @returns + */ async expandVolume(call, datasetName) { + // TODO: fix me return; const driverShareType = this.getDriverShareType(); const sshClient = this.getSshClient(); @@ -1844,6 +1874,8 @@ class FreeNASApiDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", @@ -1872,6 +1904,8 @@ class FreeNASApiDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", @@ -2140,6 +2174,7 @@ class FreeNASApiDriver extends CsiBaseDriver { retention_policy: "NONE", readonly: "IGNORE", properties: false, + only_from_scratch: true, }); let job_id = response; @@ -2155,16 +2190,21 @@ class FreeNASApiDriver extends CsiBaseDriver { await sleep(3000); } + job.error = job.error || ""; + switch (job.state) { case "SUCCESS": break; case "FAILED": - // TODO: handle scenarios where the dataset - break; case "ABORTED": - // TODO: handle this - break; default: + //[EFAULT] Target dataset 'tank/.../clone-test' already exists. + if (!job.error.includes("already exists")) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to run replication task (${job.state}): ${job.error}` + ); + } break; } @@ -2287,6 +2327,7 @@ class FreeNASApiDriver extends CsiBaseDriver { retention_policy: "NONE", readonly: "IGNORE", properties: false, + only_from_scratch: true, }); let job_id = response; @@ -2302,16 +2343,21 @@ class FreeNASApiDriver extends CsiBaseDriver { await sleep(3000); } + job.error = job.error || ""; + switch (job.state) { case "SUCCESS": break; case "FAILED": - // TODO: handle scenarios where the dataset - break; case "ABORTED": - // TODO: handle this - break; default: + //[EFAULT] Target dataset 'tank/.../clone-test' already exists. + if (!job.error.includes("already exists")) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to run replication task (${job.state}): ${job.error}` + ); + } break; } } catch (err) { @@ -2442,9 +2488,35 @@ class FreeNASApiDriver extends CsiBaseDriver { this.options.zfs.hasOwnProperty("datasetPermissionsUser") || this.options.zfs.hasOwnProperty("datasetPermissionsGroup") ) { - // TODO: ensure the values are numbers and not strings setPerms = true; + } + + // user + if (this.options.zfs.hasOwnProperty("datasetPermissionsUser")) { + if ( + String(this.options.zfs.datasetPermissionsUser).match(/^[0-9]+$/) == + null + ) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `datasetPermissionsUser must be numeric: ${this.options.zfs.datasetPermissionsUser}` + ); + } perms.uid = Number(this.options.zfs.datasetPermissionsUser); + } + + // group + if (this.options.zfs.hasOwnProperty("datasetPermissionsGroup")) { + if ( + String(this.options.zfs.datasetPermissionsGroup).match( + /^[0-9]+$/ + ) == null + ) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `datasetPermissionsGroup must be numeric: ${this.options.zfs.datasetPermissionsGroup}` + ); + } perms.gid = Number(this.options.zfs.datasetPermissionsGroup); } @@ -2635,6 +2707,7 @@ class FreeNASApiDriver extends CsiBaseDriver { * @param {*} call */ async ControllerExpandVolume(call) { + // TODO: https://jira.ixsystems.com/browse/NAS-111707 const driver = this; const driverZfsResourceType = this.getDriverZfsResourceType(); const httpApiClient = await this.getTrueNASHttpApiClient(); @@ -3160,19 +3233,15 @@ class FreeNASApiDriver extends CsiBaseDriver { )}`; response = await httpClient.get(endpoint, { "extra.snapshots": 1, + "extra.snapshots_properties": JSON.stringify(zfsProperties), }); if (response.statusCode == 404) { throw new Error("dataset does not exist"); } else if (response.statusCode == 200) { for (let snapshot of response.body.snapshots) { - // TODO: alter the logic here to not be n+1 - let i_response = await httpApiClient.SnapshotGet( - snapshot.name, - zfsProperties - ); let row = {}; - for (let p in i_response) { - row[p] = i_response[p].rawvalue; + for (let p in snapshot.properties) { + row[p] = snapshot.properties[p].rawvalue; } rows.push(row); } @@ -3187,20 +3256,16 @@ class FreeNASApiDriver extends CsiBaseDriver { )}`; response = await httpClient.get(endpoint, { "extra.snapshots": 1, + "extra.snapshots_properties": JSON.stringify(zfsProperties), }); if (response.statusCode == 404) { throw new Error("dataset does not exist"); } else if (response.statusCode == 200) { for (let child of response.body.children) { for (let snapshot of child.snapshots) { - // TODO: alter the logic here to not be n+1 - let i_response = await httpApiClient.SnapshotGet( - snapshot.name, - zfsProperties - ); let row = {}; - for (let p in i_response) { - row[p] = i_response[p].rawvalue; + for (let p in snapshot.properties) { + row[p] = snapshot.properties[p].rawvalue; } rows.push(row); } @@ -3266,7 +3331,6 @@ class FreeNASApiDriver extends CsiBaseDriver { } else if (response.statusCode == 200) { for (let child of response.body.children) { for (let grandchild of child.children) { - // TODO: ask for full snapshot properties to be returned in the above endpoint to avoid the n+1 logic here let i_response = httpApiClient.normalizeProperties( grandchild, zfsProperties @@ -3543,6 +3607,7 @@ class FreeNASApiDriver extends CsiBaseDriver { retention_policy: "NONE", readonly: "IGNORE", properties: false, + only_from_scratch: true, }); let job_id = response; @@ -3555,16 +3620,21 @@ class FreeNASApiDriver extends CsiBaseDriver { await sleep(3000); } + job.error = job.error || ""; + switch (job.state) { case "SUCCESS": break; case "FAILED": - // TODO: handle scenarios where the dataset - break; case "ABORTED": - // TODO: handle this - break; default: + //[EFAULT] Target dataset 'tank/.../clone-test' already exists. + if (!job.error.includes("already exists")) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to run replication task (${job.state}): ${job.error}` + ); + } break; } diff --git a/src/driver/index.js b/src/driver/index.js index bdf1d13..9e62d68 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -286,6 +286,7 @@ class CsiBaseDriver { const volume_context = call.request.volume_context; let fs_type; let mount_flags; + let volume_mount_group; const node_attach_driver = volume_context.node_attach_driver; const block_path = staging_target_path + "/block_device"; const bind_mount_flags = []; @@ -305,6 +306,15 @@ class CsiBaseDriver { mount_flags.push(normalizedSecrets.mount_flags); } mount_flags.push("defaults"); + + if ( + semver.satisfies(driver.ctx.csiVersion, ">=1.5.0") && + driver.options.service.node.capabilities.rpc.includes( + "VOLUME_MOUNT_GROUP" + ) + ) { + volume_mount_group = capability.mount.volume_mount_group; // in k8s this is derrived from the fsgroup in the pod security context + } } if (call.request.volume_context.provisioner_driver == "node-manual") { @@ -832,6 +842,7 @@ class CsiBaseDriver { } async NodePublishVolume(call) { + const driver = this; const mount = new Mount(); const filesystem = new Filesystem(); let result; @@ -841,14 +852,25 @@ class CsiBaseDriver { const target_path = call.request.target_path; const capability = call.request.volume_capability; const access_type = capability.access_type || "mount"; + let mount_flags; + let volume_mount_group; const readonly = call.request.readonly; const volume_context = call.request.volume_context; const bind_mount_flags = []; const node_attach_driver = volume_context.node_attach_driver; if (access_type == "mount") { - let mount_flags = capability.mount.mount_flags || []; + mount_flags = capability.mount.mount_flags || []; bind_mount_flags.push(...mount_flags); + + if ( + semver.satisfies(driver.ctx.csiVersion, ">=1.5.0") && + driver.options.service.node.capabilities.rpc.includes( + "VOLUME_MOUNT_GROUP" + ) + ) { + volume_mount_group = capability.mount.volume_mount_group; // in k8s this is derrived from the fsgroup in the pod security context + } } bind_mount_flags.push("defaults"); diff --git a/src/driver/node-manual/index.js b/src/driver/node-manual/index.js index d2de424..c811d70 100644 --- a/src/driver/node-manual/index.js +++ b/src/driver/node-manual/index.js @@ -1,5 +1,6 @@ const { CsiBaseDriver } = require("../index"); const { GrpcError, grpc } = require("../../utils/grpc"); +const semver = require("semver"); /** * Driver which only runs the node portion and is meant to be used entirely @@ -58,6 +59,21 @@ class NodeManualDriver extends CsiBaseDriver { //"PUBLISH_READONLY", //"EXPAND_VOLUME", ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + options.service.controller.capabilities.rpc + .push + //"VOLUME_CONDITION", + //"GET_VOLUME" + (); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.controller.capabilities.rpc + .push + //"SINGLE_NODE_MULTI_WRITER" + (); + } } if (!("rpc" in options.service.node.capabilities)) { @@ -69,6 +85,18 @@ class NodeManualDriver extends CsiBaseDriver { "GET_VOLUME_STATS", //"EXPAND_VOLUME" ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + //options.service.node.capabilities.rpc.push("VOLUME_CONDITION"); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER"); + /** + * This is for volumes that support a mount time gid such as smb or fat + */ + //options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP"); + } } } @@ -122,6 +150,8 @@ class NodeManualDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", @@ -148,6 +178,8 @@ class NodeManualDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", "MULTI_NODE_READER_ONLY", "MULTI_NODE_SINGLE_WRITER", diff --git a/src/driver/zfs-local-ephemeral-inline/index.js b/src/driver/zfs-local-ephemeral-inline/index.js index 8b50cf1..5668011 100644 --- a/src/driver/zfs-local-ephemeral-inline/index.js +++ b/src/driver/zfs-local-ephemeral-inline/index.js @@ -2,6 +2,7 @@ const fs = require("fs"); const { CsiBaseDriver } = require("../index"); const { GrpcError, grpc } = require("../../utils/grpc"); const { Filesystem } = require("../../utils/filesystem"); +const semver = require("semver"); const SshClient = require("../../utils/ssh").SshClient; const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs"); @@ -81,6 +82,21 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver { //"PUBLISH_READONLY", //"EXPAND_VOLUME" ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + options.service.controller.capabilities.rpc + .push + //"VOLUME_CONDITION", + //"GET_VOLUME" + (); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.controller.capabilities.rpc + .push + //"SINGLE_NODE_MULTI_WRITER" + (); + } } if (!("rpc" in options.service.node.capabilities)) { @@ -91,6 +107,18 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver { "GET_VOLUME_STATS", //"EXPAND_VOLUME", ]; + + if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { + //options.service.node.capabilities.rpc.push("VOLUME_CONDITION"); + } + + if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { + options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER"); + /** + * This is for volumes that support a mount time gid such as smb or fat + */ + //options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP"); + } } } @@ -167,6 +195,8 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", ].includes(capability.access_mode.mode) ) { @@ -192,6 +222,8 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver { ![ "UNKNOWN", "SINGLE_NODE_WRITER", + "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0 + "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0 "SINGLE_NODE_READER_ONLY", ].includes(capability.access_mode.mode) ) { From e192595e6549def441694bbc8000a0870e34f94c Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 10 Aug 2021 12:04:39 -0600 Subject: [PATCH 31/44] dep bump Signed-off-by: Travis Glenn Hansen --- package-lock.json | 74 +++++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/package-lock.json b/package-lock.json index ef2600e..f9280ae 100644 --- a/package-lock.json +++ b/package-lock.json @@ -185,9 +185,9 @@ } }, "node_modules/@grpc/grpc-js": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.6.tgz", - "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.7.tgz", + "integrity": "sha512-CKQVuwuSPh40tgOkR7c0ZisxYRiN05PcKPW72mQL5y++qd7CwBRoaJZvU5xfXnCJDFBmS3qZGQ71Frx6Ofo2XA==", "dependencies": { "@types/node": ">=12.12.47" }, @@ -454,8 +454,7 @@ "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "devOptional": true + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, "node_modules/bcrypt-pbkdf": { "version": "1.0.2", @@ -469,7 +468,6 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "devOptional": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -559,6 +557,14 @@ "wrap-ansi": "^7.0.0" } }, + "node_modules/code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", @@ -645,8 +651,7 @@ "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "devOptional": true + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" }, "node_modules/core-util-is": { "version": "1.0.2", @@ -1126,8 +1131,7 @@ "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "node_modules/functional-red-black-tree": { "version": "1.0.1", @@ -1155,7 +1159,6 @@ "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -1929,7 +1932,6 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "devOptional": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -2184,7 +2186,6 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "devOptional": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -2290,6 +2291,14 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, + "node_modules/number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -2302,7 +2311,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "devOptional": true, "dependencies": { "wrappy": "1" } @@ -2364,7 +2372,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "devOptional": true, "engines": { "node": ">=0.10.0" } @@ -3048,8 +3055,7 @@ "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "devOptional": true + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "node_modules/y18n": { "version": "5.0.8", @@ -3224,9 +3230,9 @@ } }, "@grpc/grpc-js": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.6.tgz", - "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.7.tgz", + "integrity": "sha512-CKQVuwuSPh40tgOkR7c0ZisxYRiN05PcKPW72mQL5y++qd7CwBRoaJZvU5xfXnCJDFBmS3qZGQ71Frx6Ofo2XA==", "requires": { "@types/node": ">=12.12.47" } @@ -3447,8 +3453,7 @@ "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "devOptional": true + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, "bcrypt-pbkdf": { "version": "1.0.2", @@ -3462,7 +3467,6 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "devOptional": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -3530,6 +3534,10 @@ "wrap-ansi": "^7.0.0" } }, + "code-point-at": { + "version": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" + }, "color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", @@ -3606,8 +3614,7 @@ "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "devOptional": true + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" }, "core-util-is": { "version": "1.0.2", @@ -3981,8 +3988,7 @@ "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "functional-red-black-tree": { "version": "1.0.1", @@ -4007,7 +4013,6 @@ "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -4571,7 +4576,6 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "devOptional": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -4787,7 +4791,6 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "devOptional": true, "requires": { "brace-expansion": "^1.1.7" } @@ -4874,6 +4877,10 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, + "number-is-nan": { + "version": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" + }, "oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -4883,7 +4890,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "devOptional": true, "requires": { "wrappy": "1" } @@ -4935,8 +4941,7 @@ "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "devOptional": true + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" }, "path-key": { "version": "3.1.1", @@ -5443,8 +5448,7 @@ "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "devOptional": true + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "y18n": { "version": "5.0.8", From 61a550c1fec8d33664f8df57abf77459c3c36a1c Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 10 Aug 2021 20:52:35 -0600 Subject: [PATCH 32/44] rename config option from Attributes to Template Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/index.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index f3c51d5..13adb4e 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -361,7 +361,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { } } else { // create lun - data = Object.assign({}, driver.options.iscsi.lunAttributes, { + data = Object.assign({}, driver.options.iscsi.lunTemplate, { name: iscsiName, location: driver.options.synology.volume, size: capacity_bytes, @@ -371,7 +371,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { // create target let iqn = driver.options.iscsi.baseiqn + iscsiName; - data = Object.assign({}, driver.options.iscsi.targetAttributes, { + data = Object.assign({}, driver.options.iscsi.targetTemplate, { name: iscsiName, iqn, }); From 1a5b2e699a9e6fd36b5cb290f309256fe2d0fcd7 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 11 Aug 2021 09:21:06 -0600 Subject: [PATCH 33/44] rename attributes to template for snapshots Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/index.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/driver/controller-synology/index.js b/src/driver/controller-synology/index.js index 13adb4e..6efe715 100644 --- a/src/driver/controller-synology/index.js +++ b/src/driver/controller-synology/index.js @@ -774,7 +774,7 @@ class ControllerSynologyDriver extends CsiBaseDriver { }; } - let data = Object.assign({}, driver.options.iscsi.lunSnapshotAttributes, { + let data = Object.assign({}, driver.options.iscsi.lunSnapshotTemplate, { src_lun_uuid: lun.uuid, taken_by: "democratic-csi", description: name, //check From 023f7e16b2be2d8c43ef21ee8488aebf6a71956b Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sat, 14 Aug 2021 13:58:39 -0600 Subject: [PATCH 34/44] more performant proccess for removing all snapshots on a dataset Signed-off-by: Travis Glenn Hansen --- src/driver/freenas/api.js | 34 ++++------------------- src/driver/freenas/http/api.js | 50 ++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 29 deletions(-) diff --git a/src/driver/freenas/api.js b/src/driver/freenas/api.js index ba77af4..75b7acb 100644 --- a/src/driver/freenas/api.js +++ b/src/driver/freenas/api.js @@ -1601,29 +1601,10 @@ class FreeNASApiDriver extends CsiBaseDriver { } } - async removeSnapshotsFromDatatset(datasetName, options = {}) { - // TODO: alter the logic here to not be n+1 - // https://jira.ixsystems.com/browse/NAS-111708 - const httpClient = await this.getHttpClient(); + async removeSnapshotsFromDatatset(datasetName) { const httpApiClient = await this.getTrueNASHttpApiClient(); - - let response; - let endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`; - response = await httpClient.get(endpoint, { "extra.snapshots": 1 }); - - //console.log(response); - - if (response.statusCode == 404) { - return; - } - if (response.statusCode == 200) { - for (let snapshot of response.body.snapshots) { - await httpApiClient.SnapshotDelete(snapshot.name); - } - return; - } - - throw new Error("unhandled statusCode: " + response.statusCode); + let job_id = await httpApiClient.DatasetDestroySnapshots(datasetName); + await httpApiClient.CoreWaitForJob(job_id, 30); } /** @@ -2224,9 +2205,7 @@ class FreeNASApiDriver extends CsiBaseDriver { } // remove snapshots from target - await this.removeSnapshotsFromDatatset(datasetName, { - force: true, - }); + await this.removeSnapshotsFromDatatset(datasetName); } else { try { response = await httpApiClient.CloneCreate( @@ -2377,9 +2356,7 @@ class FreeNASApiDriver extends CsiBaseDriver { ); // remove snapshots from target - await this.removeSnapshotsFromDatatset(datasetName, { - force: true, - }); + await this.removeSnapshotsFromDatatset(datasetName); // remove snapshot from source await httpApiClient.SnapshotDelete(fullSnapshotName, { @@ -2707,7 +2684,6 @@ class FreeNASApiDriver extends CsiBaseDriver { * @param {*} call */ async ControllerExpandVolume(call) { - // TODO: https://jira.ixsystems.com/browse/NAS-111707 const driver = this; const driverZfsResourceType = this.getDriverZfsResourceType(); const httpApiClient = await this.getTrueNASHttpApiClient(); diff --git a/src/driver/freenas/http/api.js b/src/driver/freenas/http/api.js index 8607697..1832928 100644 --- a/src/driver/freenas/http/api.js +++ b/src/driver/freenas/http/api.js @@ -1,3 +1,4 @@ +const { sleep } = require("../../../utils/general"); const { Zetabyte } = require("../../../utils/zfs"); // used for in-memory cache of the version info @@ -491,6 +492,30 @@ class Api { throw new Error(JSON.stringify(response.body)); } + async DatasetDestroySnapshots(datasetName, data = {}) { + const httpClient = await this.getHttpClient(false); + let response; + let endpoint; + + data.name = datasetName; + + endpoint = "/pool/dataset/destroy_snapshots"; + response = await httpClient.post(endpoint, data); + + if (response.statusCode == 200) { + return response.body; + } + + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes("already exists") + ) { + return; + } + + throw new Error(JSON.stringify(response.body)); + } + async SnapshotSet(snapshotName, properties) { const httpClient = await this.getHttpClient(false); let response; @@ -654,6 +679,31 @@ class Api { throw new Error(JSON.stringify(response.body)); } + async CoreWaitForJob(job_id, timeout = 0) { + if (!job_id) { + throw new Error("invalid job_id"); + } + + const startTime = Date.now() / 1000; + let currentTime; + + let job; + + // wait for job to finish + while (!job || !["SUCCESS", "ABORTED", "FAILED"].includes(job.state)) { + job = await this.CoreGetJobs({ id: job_id }); + job = job[0]; + await sleep(3000); + + currentTime = Date.now() / 1000; + if (timeout > 0 && currentTime > startTime + timeout) { + throw new Error("timeout waiting for job to complete"); + } + } + + return job; + } + async CoreGetJobs(data) { const httpClient = await this.getHttpClient(false); From 6c08679042f1c49900c688c25fa9e8f0af092a93 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sat, 14 Aug 2021 14:20:14 -0600 Subject: [PATCH 35/44] convert bools to strings for synology api Signed-off-by: Travis Glenn Hansen --- src/driver/controller-synology/http/index.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/driver/controller-synology/http/index.js b/src/driver/controller-synology/http/index.js index 0e7ea50..9693024 100644 --- a/src/driver/controller-synology/http/index.js +++ b/src/driver/controller-synology/http/index.js @@ -91,7 +91,7 @@ class SynologyHttpClient { case "GET": let qsData = JSON.parse(JSON.stringify(data)); for (let p in qsData) { - if (Array.isArray(qsData[p])) { + if (Array.isArray(qsData[p]) || typeof qsData[p] == "boolean") { qsData[p] = JSON.stringify(qsData[p]); } } @@ -486,7 +486,10 @@ class SynologyHttpClient { return await this.do_request( "GET", "entry.cgi", - Object.assign({}, iscsi_lun_extend, { uuid: JSON.stringify(uuid), new_size: size }) + Object.assign({}, iscsi_lun_extend, { + uuid: JSON.stringify(uuid), + new_size: size, + }) ); } From 339c53c93b3841343bf3a7b5918c24db2d1681ca Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Mon, 30 Aug 2021 06:59:11 -0600 Subject: [PATCH 36/44] attempt to gracefully handle fs expand failures during staging (#85) Signed-off-by: Travis Glenn Hansen --- src/driver/index.js | 28 +++++++++++++++++++++++++++- src/utils/filesystem.js | 7 ++++--- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/driver/index.js b/src/driver/index.js index 9e62d68..cff19b6 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -571,7 +571,33 @@ class CsiBaseDriver { case "ext3": case "ext4dev": //await filesystem.checkFilesystem(device, fs_info.type); - await filesystem.expandFilesystem(device, fs_type); + try { + await filesystem.expandFilesystem(device, fs_type); + } catch (err) { + // mount is clean and rw, but it will not expand until clean umount has been done + // failed to execute filesystem command: resize2fs /dev/sda, response: {"code":1,"stdout":"Couldn't find valid filesystem superblock.\n","stderr":"resize2fs 1.44.5 (15-Dec-2018)\nresize2fs: Superblock checksum does not match superblock while trying to open /dev/sda\n"} + // /dev/sda on /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pvc-4a80757e-5e87-475d-826f-44fcc4719348/globalmount type ext4 (rw,relatime,stripe=256) + if ( + err.code == 1 && + err.stdout.includes("find valid filesystem superblock") && + err.stderr.includes("checksum does not match superblock") + ) { + driver.ctx.logger.warn( + `successful mount, unsuccessful fs resize: attempting abnormal umount/mount/resize2fs to clear things up ${staging_target_path} (${device})` + ); + + // try an unmount/mount/fsck cycle again just to clean things up + await mount.umount(staging_target_path, []); + await mount.mount( + device, + staging_target_path, + ["-t", fs_type].concat(["-o", mount_flags.join(",")]) + ); + await filesystem.expandFilesystem(device, fs_type); + } else { + throw err; + } + } break; case "xfs": //await filesystem.checkFilesystem(device, fs_info.type); diff --git a/src/utils/filesystem.js b/src/utils/filesystem.js index 53d8d99..4937863 100644 --- a/src/utils/filesystem.js +++ b/src/utils/filesystem.js @@ -426,12 +426,13 @@ class Filesystem { // echo 1 > /sys/block/sdb/device/rescan const sys_file = `/sys/block/${device_name}/device/rescan`; + console.log(`executing filesystem command: echo 1 > ${sys_file}`); fs.writeFileSync(sys_file, "1"); } } /** - * expand a give filesystem + * expand a given filesystem * * @param {*} device * @param {*} fstype @@ -474,7 +475,7 @@ class Filesystem { } /** - * expand a give filesystem + * check a given filesystem * * fsck [options] -- [fs-options] [ ...] * @@ -593,7 +594,7 @@ class Filesystem { args.unshift(command); command = filesystem.options.paths.sudo; } - console.log("executing fileystem command: %s %s", command, args.join(" ")); + console.log("executing filesystem command: %s %s", command, args.join(" ")); const child = filesystem.options.executor.spawn(command, args, options); let didTimeout = false; From aa3f5a5484e74b2e635d53bac5fde9e62e6902b8 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 1 Sep 2021 21:42:31 -0600 Subject: [PATCH 37/44] foundations for more flexible iscsi connections Signed-off-by: Travis Glenn Hansen --- src/driver/index.js | 60 +++++++++++++++++++++++---- src/utils/filesystem.js | 5 +++ src/utils/mount.js | 91 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 146 insertions(+), 10 deletions(-) diff --git a/src/driver/index.js b/src/driver/index.js index cff19b6..c4f3316 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -298,6 +298,14 @@ class CsiBaseDriver { call.request.volume_context.provisioner_driver_instance_id ); + /* + let mount_options = await mount.getMountOptions(staging_target_path); + console.log(mount_options); + console.log(await mount.getMountOptionValue(mount_options, "stripe")); + console.log(await mount.getMountOptionPresent(mount_options, "stripee")); + throw new Error("foobar"); + */ + if (access_type == "mount") { fs_type = capability.mount.fs_type; mount_flags = capability.mount.mount_flags || []; @@ -307,6 +315,10 @@ class CsiBaseDriver { } mount_flags.push("defaults"); + // https://github.com/karelzak/util-linux/issues/1429 + //mount_flags.push("x-democratic-csi.managed"); + //mount_flags.push("x-democratic-csi.staged"); + if ( semver.satisfies(driver.ctx.csiVersion, ">=1.5.0") && driver.options.service.node.capabilities.rpc.includes( @@ -373,9 +385,27 @@ class CsiBaseDriver { // ensure unique entries only portals = [...new Set(portals)]; + // stores actual device paths after iscsi login let iscsiDevices = []; + // stores configuration of targets/iqn/luns to connect to + let iscsiConnections = []; for (let portal of portals) { + iscsiConnections.push({ + portal, + iqn: volume_context.iqn, + lun: volume_context.lun, + }); + } + + /** + * TODO: allow sending in iscsiConnection in a raw/manual format + * TODO: allow option to determine if send_targets should be invoked + * TODO: allow option to control whether nodedb entry should be created by driver + * TODO: allow option to control whether nodedb entry should be deleted by driver + */ + + for (let iscsiConnection of iscsiConnections) { // create DB entry // https://library.netapp.com/ecmdocs/ECMP1654943/html/GUID-8EC685B4-8CB6-40D8-A8D5-031A3899BCDC.html // put these options in place to force targets managed by csi to be explicitly attached (in the case of unclearn shutdown etc) @@ -391,24 +421,27 @@ class CsiBaseDriver { } } await iscsi.iscsiadm.createNodeDBEntry( - volume_context.iqn, - portal, + iscsiConnection.iqn, + iscsiConnection.portal, nodeDB ); // login - await iscsi.iscsiadm.login(volume_context.iqn, portal); + await iscsi.iscsiadm.login( + iscsiConnection.iqn, + iscsiConnection.portal + ); // get associated session let session = await iscsi.iscsiadm.getSession( - volume_context.iqn, - portal + iscsiConnection.iqn, + iscsiConnection.portal ); // rescan in scenarios when login previously occurred but volumes never appeared await iscsi.iscsiadm.rescanSession(session); // find device name - device = `/dev/disk/by-path/ip-${portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; + device = `/dev/disk/by-path/ip-${iscsiConnection.portal}-iscsi-${iscsiConnection.iqn}-lun-${iscsiConnection.lun}`; let deviceByPath = device; // can take some time for device to show up, loop for some period @@ -439,7 +472,7 @@ class CsiBaseDriver { iscsiDevices.push(device); driver.ctx.logger.info( - `successfully logged into portal ${portal} and created device ${deviceByPath} with realpath ${device}` + `successfully logged into portal ${iscsiConnection.portal} and created device ${deviceByPath} with realpath ${device}` ); } } @@ -461,7 +494,7 @@ class CsiBaseDriver { ); } - if (iscsiDevices.length != portals.length) { + if (iscsiDevices.length != iscsiConnections.length) { driver.ctx.logger.warn( `failed to attach all iscsi devices/targets/portals` ); @@ -484,7 +517,8 @@ class CsiBaseDriver { iscsiDevices.includes(value) ); - const useMultipath = portals.length > 1 || commonDevices.length > 0; + const useMultipath = + iscsiConnections.length > 1 || commonDevices.length > 0; // discover multipath device to use if (useMultipath) { @@ -679,6 +713,8 @@ class CsiBaseDriver { //result = await mount.pathIsMounted(block_path); //result = await mount.pathIsMounted(staging_target_path) + // TODO: use the x-* mount options to detect if we should delete target + try { result = await mount.pathIsMounted(block_path); } catch (err) { @@ -900,7 +936,13 @@ class CsiBaseDriver { } bind_mount_flags.push("defaults"); + + // https://github.com/karelzak/util-linux/issues/1429 + //bind_mount_flags.push("x-democratic-csi.managed"); + //bind_mount_flags.push("x-democratic-csi.published"); + if (readonly) bind_mount_flags.push("ro"); + // , "x-democratic-csi.ro" switch (node_attach_driver) { case "nfs": diff --git a/src/utils/filesystem.js b/src/utils/filesystem.js index 4937863..724f25e 100644 --- a/src/utils/filesystem.js +++ b/src/utils/filesystem.js @@ -326,9 +326,14 @@ class Filesystem { try { result = await filesystem.exec("blkid", args); } catch (err) { + if (err.code == 2 && err.stderr.includes("No such device or address")) { + throw err; + } + if (err.code == 2) { return false; } + throw err; } diff --git a/src/utils/mount.js b/src/utils/mount.js index 5ad935e..187c1d4 100644 --- a/src/utils/mount.js +++ b/src/utils/mount.js @@ -145,7 +145,7 @@ class Mount { * * @param {*} path */ - async getMountDetails(path, extraOutputProperties = []) { + async getMountDetails(path, extraOutputProperties = [], extraArgs = []) { const mount = this; let args = []; const common_options = JSON.parse(JSON.stringify(FINDMNT_COMMON_OPTIONS)); @@ -156,6 +156,7 @@ class Mount { args = args.concat(["--mountpoint", path]); args = args.concat(common_options); + args = args.concat(extraArgs); let result; try { @@ -167,6 +168,94 @@ class Mount { } } + /** + * parse a mount options string into an array + * + * @param {*} options + * @returns + */ + async parseMountOptions(options) { + if (!options) { + return []; + } + + if (Array.isArray(options)) { + return options; + } + + options = options.split(","); + return options; + } + + /** + * Given the set of mount options and sought after option, return true if the option is present + * + * @param {*} options + * @param {*} option + * @returns + */ + async getMountOptionPresent(options, option) { + const mount = this; + + if (!Array.isArray(options)) { + options = await mount.parseMountOptions(options); + } + + for (let i of options) { + let parts = i.split("=", 2); + if (parts[0] == option) { + return true; + } + } + + return false; + } + + /** + * Get the value of the given mount option + * + * if the mount option is present by has no value null is returned + * if the mount option is NOT present undefined is returned + * is the mount option has a value that value is returned + * + * @param {*} options + * @param {*} option + * @returns + */ + async getMountOptionValue(options, option) { + const mount = this; + + if (!Array.isArray(options)) { + options = await mount.parseMountOptions(options); + } + + for (let i of options) { + let parts = i.split("=", 2); + if (parts[0] == option) { + if (typeof parts[1] === "undefined") { + return null; + } else { + return parts[1]; + } + } + } + + return undefined; + } + + /** + * Get mount optsion for a given path + * + * @param {*} path + * @returns Array + */ + async getMountOptions(path) { + const mount = this; + let details = await mount.getMountDetails(path, [], ["-m"]); + + return await mount.parseMountOptions(details.options); + } + /** * Get the device (source) at the given mount point * From 8aeaa5cc7868d305b4a8816ba9053384203e2360 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 1 Sep 2021 21:42:49 -0600 Subject: [PATCH 38/44] build images for both docker hub and ghcr Signed-off-by: Travis Glenn Hansen --- .github/bin/docker-release.sh | 13 +++++++++---- .github/workflows/main.yml | 2 ++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/bin/docker-release.sh b/.github/bin/docker-release.sh index 726698f..f9686c1 100755 --- a/.github/bin/docker-release.sh +++ b/.github/bin/docker-release.sh @@ -1,11 +1,16 @@ #!/bin/bash -echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin +echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin +echo "$GHCR_PASSWORD" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin export DOCKER_ORG="democraticcsi" export DOCKER_PROJECT="democratic-csi" export DOCKER_REPO="${DOCKER_ORG}/${DOCKER_PROJECT}" +export GHCR_ORG="democratic-csi" +export GHCR_PROJECT="democratic-csi" +export GHCR_REPO="ghcr.io/${GHCR_ORG}/${GHCR_PROJECT}" + if [[ $GITHUB_REF == refs/tags/* ]]; then export GIT_TAG=${GITHUB_REF#refs/tags/} else @@ -13,12 +18,12 @@ else fi if [[ -n "${GIT_TAG}" ]]; then - docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_TAG} . + docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_TAG} -t ${GHCR_REPO}:${GIT_TAG} . elif [[ -n "${GIT_BRANCH}" ]]; then if [[ "${GIT_BRANCH}" == "master" ]]; then - docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:latest . + docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:latest -t ${GHCR_REPO}:latest . else - docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_BRANCH} . + docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_BRANCH} -t ${GHCR_REPO}:${GIT_BRANCH} . fi else : diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index fd18c2a..ecda440 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -34,5 +34,7 @@ jobs: env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }} + GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }} DOCKER_CLI_EXPERIMENTAL: enabled DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7 From 34657e33aabb364bd8cc78618ae0a5ba99afd8bb Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 1 Sep 2021 21:55:44 -0600 Subject: [PATCH 39/44] dep bumps Signed-off-by: Travis Glenn Hansen --- Dockerfile | 2 +- package-lock.json | 81 ++++++++++++++++++++++++----------------------- 2 files changed, 42 insertions(+), 41 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2309dc7..017c408 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 ENV LANG=en_US.utf8 -ENV NODE_VERSION=v12.20.0 +ENV NODE_VERSION=v12.22.6 #ENV NODE_VERSION=v14.15.1 ENV NODE_ENV=production diff --git a/package-lock.json b/package-lock.json index f9280ae..2f4b754 100644 --- a/package-lock.json +++ b/package-lock.json @@ -5,6 +5,7 @@ "requires": true, "packages": { "": { + "name": "democratic-csi", "version": "1.2.0", "license": "MIT", "dependencies": { @@ -310,9 +311,9 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "node_modules/@types/node": { - "version": "16.4.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.4.13.tgz", - "integrity": "sha512-bLL69sKtd25w7p1nvg9pigE4gtKVpGTPojBFLMkGHXuUgap2sLqQt2qUnqmVCDfzGUL0DRNZP+1prIZJbMeAXg==" + "version": "16.7.10", + "resolved": "https://registry.npmjs.org/@types/node/-/node-16.7.10.tgz", + "integrity": "sha512-S63Dlv4zIPb8x6MMTgDq5WWRJQe56iBEY0O3SOFA9JrRienkOVDXSXBjjJw6HTNQYSE2JI6GMCR6LVbIMHJVvA==" }, "node_modules/acorn": { "version": "7.4.1", @@ -426,11 +427,11 @@ "integrity": "sha512-XdD5lRO/87udXCMC9meWdYiR+Nq6ZjUfXidViUZGu2F1MO4T3XwZ1et0hb2++BgLfhyJwy44BGB/yx80ABx8hg==" }, "node_modules/async-mutex": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.1.tgz", - "integrity": "sha512-vRfQwcqBnJTLzVQo72Sf7KIUbcSUP5hNchx6udI1U6LuPQpfePgdjJzlCe76yFZ8pxlLjn9lwcl/Ya0TSOv0Tw==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.2.tgz", + "integrity": "sha512-HuTK7E7MT7jZEh1P9GtRW9+aTWiDWWi9InbZ5hjxrnRa39KS4BW04+xLBhYNS2aXhHUIKZSw3gj4Pn1pj+qGAA==", "dependencies": { - "tslib": "^2.1.0" + "tslib": "^2.3.1" } }, "node_modules/asynckit": { @@ -1187,9 +1188,9 @@ } }, "node_modules/globals": { - "version": "13.10.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.10.0.tgz", - "integrity": "sha512-piHC3blgLGFjvOuMmWZX60f+na1lXFDhQXBf1UYp2fXPXqvEUbOhNwi6BsQ0bQishwedgnjkwv1d9zKf+MWw3g==", + "version": "13.11.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.11.0.tgz", + "integrity": "sha512-08/xrJ7wQjK9kkkRoI3OFUBbLx4f+6x3SGwcPvQ0QH6goFDrOU2oyAWrmh3dJezu65buo+HBMzAMQy6rovVC3g==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -2670,9 +2671,9 @@ "dev": true }, "node_modules/ssh2": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.2.0.tgz", - "integrity": "sha512-vklfVRyylayGV/zMwVEkTC9kBhA3t264hoUHV/yGuunBJh6uBGP1VlzhOp8EsqxpKnG0xkLE1qHZlU0+t8Vh6Q==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.3.0.tgz", + "integrity": "sha512-OjBQ5FR+ClYav3fRnvkhycmd5co5qEfofBaZEqVO3I4tKJLZqu+Ku4LN4nJSckjhqQnomqBqlCdvD3iGV+6isA==", "hasInstallScript": true, "dependencies": { "asn1": "^0.2.4", @@ -2683,7 +2684,7 @@ }, "optionalDependencies": { "cpu-features": "0.0.2", - "nan": "^2.14.2" + "nan": "^2.15.0" } }, "node_modules/sshpk": { @@ -2842,9 +2843,9 @@ "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" }, "node_modules/tslib": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz", - "integrity": "sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==" + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz", + "integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw==" }, "node_modules/tunnel-agent": { "version": "0.6.0", @@ -3071,9 +3072,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yargs": { - "version": "17.1.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.0.tgz", - "integrity": "sha512-SQr7qqmQ2sNijjJGHL4u7t8vyDZdZ3Ahkmo4sc1w5xI9TBX0QDdG/g4SFnxtWOsGLjwHQue57eFALfwFCnixgg==", + "version": "17.1.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.1.tgz", + "integrity": "sha512-c2k48R0PwKIqKhPMWjeiF6y2xY/gPMUlro0sgxqXpbOIohWiLNXWslsootttv7E1e73QPAMQSg5FeySbVcpsPQ==", "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", @@ -3342,9 +3343,9 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "@types/node": { - "version": "16.4.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.4.13.tgz", - "integrity": "sha512-bLL69sKtd25w7p1nvg9pigE4gtKVpGTPojBFLMkGHXuUgap2sLqQt2qUnqmVCDfzGUL0DRNZP+1prIZJbMeAXg==" + "version": "16.7.10", + "resolved": "https://registry.npmjs.org/@types/node/-/node-16.7.10.tgz", + "integrity": "sha512-S63Dlv4zIPb8x6MMTgDq5WWRJQe56iBEY0O3SOFA9JrRienkOVDXSXBjjJw6HTNQYSE2JI6GMCR6LVbIMHJVvA==" }, "acorn": { "version": "7.4.1", @@ -3428,11 +3429,11 @@ "integrity": "sha512-XdD5lRO/87udXCMC9meWdYiR+Nq6ZjUfXidViUZGu2F1MO4T3XwZ1et0hb2++BgLfhyJwy44BGB/yx80ABx8hg==" }, "async-mutex": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.1.tgz", - "integrity": "sha512-vRfQwcqBnJTLzVQo72Sf7KIUbcSUP5hNchx6udI1U6LuPQpfePgdjJzlCe76yFZ8pxlLjn9lwcl/Ya0TSOv0Tw==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.2.tgz", + "integrity": "sha512-HuTK7E7MT7jZEh1P9GtRW9+aTWiDWWi9InbZ5hjxrnRa39KS4BW04+xLBhYNS2aXhHUIKZSw3gj4Pn1pj+qGAA==", "requires": { - "tslib": "^2.1.0" + "tslib": "^2.3.1" } }, "asynckit": { @@ -4032,9 +4033,9 @@ } }, "globals": { - "version": "13.10.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.10.0.tgz", - "integrity": "sha512-piHC3blgLGFjvOuMmWZX60f+na1lXFDhQXBf1UYp2fXPXqvEUbOhNwi6BsQ0bQishwedgnjkwv1d9zKf+MWw3g==", + "version": "13.11.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.11.0.tgz", + "integrity": "sha512-08/xrJ7wQjK9kkkRoI3OFUBbLx4f+6x3SGwcPvQ0QH6goFDrOU2oyAWrmh3dJezu65buo+HBMzAMQy6rovVC3g==", "dev": true, "requires": { "type-fest": "^0.20.2" @@ -5152,14 +5153,14 @@ "dev": true }, "ssh2": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.2.0.tgz", - "integrity": "sha512-vklfVRyylayGV/zMwVEkTC9kBhA3t264hoUHV/yGuunBJh6uBGP1VlzhOp8EsqxpKnG0xkLE1qHZlU0+t8Vh6Q==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.3.0.tgz", + "integrity": "sha512-OjBQ5FR+ClYav3fRnvkhycmd5co5qEfofBaZEqVO3I4tKJLZqu+Ku4LN4nJSckjhqQnomqBqlCdvD3iGV+6isA==", "requires": { "asn1": "^0.2.4", "bcrypt-pbkdf": "^1.0.2", "cpu-features": "0.0.2", - "nan": "^2.14.2" + "nan": "^2.15.0" } }, "sshpk": { @@ -5284,9 +5285,9 @@ "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" }, "tslib": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz", - "integrity": "sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==" + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz", + "integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw==" }, "tunnel-agent": { "version": "0.6.0", @@ -5461,9 +5462,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "yargs": { - "version": "17.1.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.0.tgz", - "integrity": "sha512-SQr7qqmQ2sNijjJGHL4u7t8vyDZdZ3Ahkmo4sc1w5xI9TBX0QDdG/g4SFnxtWOsGLjwHQue57eFALfwFCnixgg==", + "version": "17.1.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.1.tgz", + "integrity": "sha512-c2k48R0PwKIqKhPMWjeiF6y2xY/gPMUlro0sgxqXpbOIohWiLNXWslsootttv7E1e73QPAMQSg5FeySbVcpsPQ==", "requires": { "cliui": "^7.0.2", "escalade": "^3.1.1", From e1ce46d09c2cf9904ff7fbace53d6ef15e9c54a7 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 1 Sep 2021 22:01:27 -0600 Subject: [PATCH 40/44] add cmake to build deps Signed-off-by: Travis Glenn Hansen --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 017c408..7532bf5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ ENV NODE_VERSION=v12.22.6 ENV NODE_ENV=production # install build deps -RUN apt-get update && apt-get install -y python make gcc g++ +RUN apt-get update && apt-get install -y python make cmake gcc g++ # install node RUN apt-get update && apt-get install -y wget xz-utils From 339bfa6ba6fe947b3dd73fe26276bf08312ee05b Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 1 Sep 2021 22:15:51 -0600 Subject: [PATCH 41/44] add label to docker image to autolink to repo Signed-off-by: Travis Glenn Hansen --- Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile b/Dockerfile index 7532bf5..09bd268 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,6 +43,8 @@ RUN rm -rf docker ###################### FROM debian:10-slim +LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi + ENV DEBIAN_FRONTEND=noninteractive ARG TARGETPLATFORM From 22f4486e8a5163dae7eb57187adfacdc826caec7 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 2 Sep 2021 22:23:24 -0600 Subject: [PATCH 42/44] fix controller expand volume logic for SCALE Signed-off-by: Travis Glenn Hansen --- src/driver/freenas/ssh.js | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/src/driver/freenas/ssh.js b/src/driver/freenas/ssh.js index 605b873..c88c101 100644 --- a/src/driver/freenas/ssh.js +++ b/src/driver/freenas/ssh.js @@ -201,9 +201,8 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver { share = { nfs_paths: [properties.mountpoint.value], nfs_comment: `democratic-csi (${this.ctx.args.csiName}): ${datasetName}`, - nfs_network: this.options.nfs.shareAllowedNetworks.join( - "," - ), + nfs_network: + this.options.nfs.shareAllowedNetworks.join(","), nfs_hosts: this.options.nfs.shareAllowedHosts.join(","), nfs_alldirs: this.options.nfs.shareAlldirs, nfs_ro: false, @@ -633,11 +632,10 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver { ? this.options.iscsi.extentBlocksize : 512; - const extentDisablePhysicalBlocksize = this.options.iscsi.hasOwnProperty( - "extentDisablePhysicalBlocksize" - ) - ? this.options.iscsi.extentDisablePhysicalBlocksize - : true; + const extentDisablePhysicalBlocksize = + this.options.iscsi.hasOwnProperty("extentDisablePhysicalBlocksize") + ? this.options.iscsi.extentDisablePhysicalBlocksize + : true; const extentRpm = this.options.iscsi.hasOwnProperty("extentRpm") ? this.options.iscsi.extentRpm @@ -1605,6 +1603,7 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver { async expandVolume(call, datasetName) { const driverShareType = this.getDriverShareType(); const sshClient = this.getSshClient(); + const zb = await this.getZetabyte(); switch (driverShareType) { case "iscsi": @@ -1612,7 +1611,29 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver { let command; let reload = false; if (isScale) { - command = sshClient.buildCommand("systemctl", ["reload", "scst"]); + let properties; + properties = await zb.zfs.get(datasetName, [ + FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME, + ]); + properties = properties[datasetName]; + this.ctx.logger.debug("zfs props data: %j", properties); + let iscsiName = + properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value; + + /** + * command = sshClient.buildCommand("systemctl", ["reload", "scst"]); + * does not help ^ + * + * echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size + * works ^ + * + * scstadmin -resync_dev ${iscsiName} + * works but always give a exit code of 1 ^ + */ + command = sshClient.buildCommand("sh", [ + "-c", + `echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size`, + ]); reload = true; } else { command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); From 8a45b7a7ddd2def0dec075942d2fe9d35ca301c4 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 2 Sep 2021 22:24:31 -0600 Subject: [PATCH 43/44] prep for release, documentation Signed-off-by: Travis Glenn Hansen --- README.md | 24 +++++++-- examples/freenas-api-iscsi.yaml | 85 +++++++++++++++++++++++++++++++ examples/freenas-api-nfs.yaml | 58 +++++++++++++++++++++ examples/freenas-api-smb.yaml | 77 ++++++++++++++++++++++++++++ examples/freenas-smb.yaml | 2 +- examples/node-common.yaml | 23 +++++++++ examples/synology-iscsi.yaml | 89 +++++++++++++++++++++++++++++++++ 7 files changed, 354 insertions(+), 4 deletions(-) create mode 100644 examples/freenas-api-iscsi.yaml create mode 100644 examples/freenas-api-nfs.yaml create mode 100644 examples/freenas-api-smb.yaml create mode 100644 examples/node-common.yaml create mode 100644 examples/synology-iscsi.yaml diff --git a/README.md b/README.md index c64c038..dd8f3bb 100644 --- a/README.md +++ b/README.md @@ -18,15 +18,21 @@ have access to resizing, snapshots, clones, etc functionality. - `freenas-nfs` (manages zfs datasets to share over nfs) - `freenas-iscsi` (manages zfs zvols to share over iscsi) - `freenas-smb` (manages zfs datasets to share over smb) + - `freenas-api-nfs` experimental use with SCALE only (manages zfs datasets to share over nfs) + - `freenas-api-iscsi` experimental use with SCALE only (manages zfs zvols to share over iscsi) + - `freenas-api-smb` experimental use with SCALE only (manages zfs datasets to share over smb) - `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu) - `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu) - `zfs-local-ephemeral-inline` (provisions node-local zfs datasets) + - `synology-iscsi` experimental (manages volumes to share over iscsi) + - `lustre-client` (crudely provisions storage using a shared lustre + share/directory for all volumes) - `nfs-client` (crudely provisions storage using a shared nfs share/directory for all volumes) - `smb-client` (crudely provisions storage using a shared smb share/directory for all volumes) - - `node-manual` (allows connecting to manually created smb, nfs, and iscsi - volumes, see sample PVs in the `examples` directory) + - `node-manual` (allows connecting to manually created smb, nfs, lustre, and + iscsi volumes, see sample PVs in the `examples` directory) - framework for developing `csi` drivers If you have any interest in providing a `csi` driver, simply open an issue to @@ -139,11 +145,16 @@ necessary. Server preparation depends slightly on which `driver` you are using. -### FreeNAS (freenas-nfs, freenas-iscsi, freenas-smb) +### FreeNAS (freenas-nfs, freenas-iscsi, freenas-smb, freenas-api-nfs, freenas-api-iscsi, freenas-api-smb) The recommended version of FreeNAS is 12.0-U2+, however the driver should work with much older versions as well. +The various `freenas-api-*` drivers are currently EXPERIMENTAL and can only be +used with SCALE 21.08+. Fundamentally these drivers remove the need for `ssh` +connections and do all operations entirely with the TrueNAS api. With that in +mind, any ssh/shell/etc requirements below can be safely ignored. + Ensure the following services are configurged and running: - ssh (if you use a password for authentication make sure it is allowed) @@ -208,6 +219,10 @@ Ensure ssh and zfs is installed on the nfs/iscsi server and that you have instal - `sudo yum install targetcli -y` - `sudo apt-get -y install targetcli-fb` +### Synology (synology-iscsi) + +Ensure iscsi manager has been installed and is generally setup/configured. + ## Helm Installation ``` @@ -252,6 +267,9 @@ microk8s helm upgrade \ zfs-nfs democratic-csi/democratic-csi ``` +- microk8s - `/var/snap/microk8s/common/var/lib/kubelet` +- pivotal - `/var/vcap/data/kubelet` + ### openshift `democratic-csi` generally works fine with openshift. Some special parameters diff --git a/examples/freenas-api-iscsi.yaml b/examples/freenas-api-iscsi.yaml new file mode 100644 index 0000000..b49407e --- /dev/null +++ b/examples/freenas-api-iscsi.yaml @@ -0,0 +1,85 @@ +driver: freenas-api-iscsi +instance_id: +httpConnection: + protocol: http + host: server address + port: 80 + # use only 1 of apiKey or username/password + # if both are present, apiKey is preferred + # apiKey is only available starting in TrueNAS-12 + #apiKey: + username: root + password: + allowInsecure: true + # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + # leave unset for auto-detection + #apiVersion: 2 +zfs: + # can be used to override defaults if necessary + # the example below is useful for TrueNAS 12 + #cli: + # sudoEnabled: true + # + # leave paths unset for auto-detection + # paths: + # zfs: /usr/local/sbin/zfs + # zpool: /usr/local/sbin/zpool + # sudo: /usr/local/bin/sudo + # chroot: /usr/sbin/chroot + + # can be used to set arbitrary values on the dataset/zvol + # can use handlebars templates with the parameters from the storage class/CO + #datasetProperties: + # "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}" + # "org.freenas:test": "{{ parameters.foo }}" + # "org.freenas:test2": "some value" + + # total volume name (zvol//) length cannot exceed 63 chars + # https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab + # standard volume naming overhead is 46 chars + # datasetParentName should therefore be 17 chars or less + datasetParentName: tank/k8s/b/vols + # do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap + # they may be siblings, but neither should be nested in the other + detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps + # "" (inherit), lz4, gzip-9, etc + zvolCompression: + # "" (inherit), on, off, verify + zvolDedup: + zvolEnableReservation: false + # 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K + zvolBlocksize: +iscsi: + targetPortal: "server[:port]" + # for multipath + targetPortals: [] # [ "server[:port]", "server[:port]", ... ] + # leave empty to omit usage of -I with iscsiadm + interface: + + # MUST ensure uniqueness + # full iqn limit is 223 bytes, plan accordingly + # default is "{{ name }}" + #nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}" + namePrefix: csi- + nameSuffix: "-clustera" + # add as many as needed + targetGroups: + # get the correct ID from the "portal" section in the UI + - targetGroupPortalGroup: 1 + # get the correct ID from the "initiators" section in the UI + targetGroupInitiatorGroup: 1 + # None, CHAP, or CHAP Mutual + targetGroupAuthType: None + # get the correct ID from the "Authorized Access" section of the UI + # only required if using Chap + targetGroupAuthGroup: + + extentInsecureTpc: true + extentXenCompat: false + extentDisablePhysicalBlocksize: true + # 512, 1024, 2048, or 4096, + extentBlocksize: 512 + # "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000 + extentRpm: "SSD" + # 0-100 (0 == ignore) + extentAvailThreshold: 0 diff --git a/examples/freenas-api-nfs.yaml b/examples/freenas-api-nfs.yaml new file mode 100644 index 0000000..0c59b8b --- /dev/null +++ b/examples/freenas-api-nfs.yaml @@ -0,0 +1,58 @@ +driver: freenas-api-nfs +instance_id: +httpConnection: + protocol: http + host: server address + port: 80 + # use only 1 of apiKey or username/password + # if both are present, apiKey is preferred + # apiKey is only available starting in TrueNAS-12 + #apiKey: + username: root + password: + allowInsecure: true + # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + # leave unset for auto-detection + #apiVersion: 2 +zfs: + # can be used to override defaults if necessary + # the example below is useful for TrueNAS 12 + #cli: + # sudoEnabled: true + # + # leave paths unset for auto-detection + # paths: + # zfs: /usr/local/sbin/zfs + # zpool: /usr/local/sbin/zpool + # sudo: /usr/local/bin/sudo + # chroot: /usr/sbin/chroot + + # can be used to set arbitrary values on the dataset/zvol + # can use handlebars templates with the parameters from the storage class/CO + #datasetProperties: + # "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}" + # "org.freenas:test": "{{ parameters.foo }}" + # "org.freenas:test2": "some value" + + datasetParentName: tank/k8s/a/vols + # do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap + # they may be siblings, but neither should be nested in the other + detachedSnapshotsDatasetParentName: tank/k8s/a/snaps + datasetEnableQuotas: true + datasetEnableReservation: false + datasetPermissionsMode: "0777" + datasetPermissionsUser: 0 + datasetPermissionsGroup: 0 + #datasetPermissionsAcls: + #- "-m everyone@:full_set:allow" + #- "-m u:kube:full_set:allow" + +nfs: + shareHost: server address + shareAlldirs: false + shareAllowedHosts: [] + shareAllowedNetworks: [] + shareMaprootUser: root + shareMaprootGroup: root + shareMapallUser: "" + shareMapallGroup: "" diff --git a/examples/freenas-api-smb.yaml b/examples/freenas-api-smb.yaml new file mode 100644 index 0000000..7b45f4c --- /dev/null +++ b/examples/freenas-api-smb.yaml @@ -0,0 +1,77 @@ +driver: freenas-api-smb +instance_id: +httpConnection: + protocol: http + host: server address + port: 80 + # use only 1 of apiKey or username/password + # if both are present, apiKey is preferred + # apiKey is only available starting in TrueNAS-12 + #apiKey: + username: root + password: + allowInsecure: true + # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + # leave unset for auto-detection + #apiVersion: 2 +zfs: + # can be used to override defaults if necessary + # the example below is useful for TrueNAS 12 + #cli: + # sudoEnabled: true + # + # leave paths unset for auto-detection + # paths: + # zfs: /usr/local/sbin/zfs + # zpool: /usr/local/sbin/zpool + # sudo: /usr/local/bin/sudo + # chroot: /usr/sbin/chroot + + # can be used to set arbitrary values on the dataset/zvol + # can use handlebars templates with the parameters from the storage class/CO + #datasetProperties: + # "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}" + # "org.freenas:test": "{{ parameters.foo }}" + # "org.freenas:test2": "some value" + + datasetProperties: + aclmode: restricted + casesensitivity: mixed + + datasetParentName: tank/k8s/a/vols + # do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap + # they may be siblings, but neither should be nested in the other + detachedSnapshotsDatasetParentName: tank/k8s/a/snaps + datasetEnableQuotas: true + datasetEnableReservation: false + datasetPermissionsMode: "0777" + datasetPermissionsUser: 0 + datasetPermissionsGroup: 0 + datasetPermissionsAcls: + - "-m everyone@:full_set:allow" + #- "-m u:kube:full_set:allow" + +smb: + shareHost: server address + nameTemplate: "" + namePrefix: "" + nameSuffix: "" + + # if any of the shareFoo parameters do not work with your version of FreeNAS + # simply comment the param (and use the configuration template if necessary) + + shareAuxiliaryConfigurationTemplate: | + #guest ok = yes + #guest only = yes + shareHome: false + shareAllowedHosts: [] + shareDeniedHosts: [] + #shareDefaultPermissions: true + shareGuestOk: true + #shareGuestOnly: true + #shareShowHiddenFiles: true + shareRecycleBin: true + shareBrowsable: false + shareAccessBasedEnumeration: true + shareTimeMachine: false + #shareStorageTask: diff --git a/examples/freenas-smb.yaml b/examples/freenas-smb.yaml index 9132f26..6d08b7e 100644 --- a/examples/freenas-smb.yaml +++ b/examples/freenas-smb.yaml @@ -1,4 +1,4 @@ -driver: freenas-nfs +driver: freenas-smb instance_id: httpConnection: protocol: http diff --git a/examples/node-common.yaml b/examples/node-common.yaml new file mode 100644 index 0000000..a09114c --- /dev/null +++ b/examples/node-common.yaml @@ -0,0 +1,23 @@ +# common options for the node service + +node: + mount: + # should fsck be executed before mounting the fs + checkFilesystem: + xfs: + enabled: false + customOptions: [] + ext4: + enabled: false + customOptions: [] + customFilesystemOptions: [] + format: + xfs: + customOptions: [] + #- -K + # ... + ext4: + customOptions: [] + #- -E + #- nodiscard + # ... diff --git a/examples/synology-iscsi.yaml b/examples/synology-iscsi.yaml new file mode 100644 index 0000000..b8cd825 --- /dev/null +++ b/examples/synology-iscsi.yaml @@ -0,0 +1,89 @@ +driver: synology-iscsi +httpConnection: + protocol: http + host: server address + port: 5000 + username: admin + password: password + allowInsecure: true + # should be uniqe across all installs to the same nas + session: "democratic-csi" + serialize: true + +synology: + # choose the proper volume for your system + volume: /volume1 + +iscsi: + targetPortal: "server[:port]" + # for multipath + targetPortals: [] # [ "server[:port]", "server[:port]", ... ] + # leave empty to omit usage of -I with iscsiadm + interface: "" + # can be whatever you would like + baseiqn: "iqn.2000-01.com.synology:csi." + + # MUST ensure uniqueness + # full iqn limit is 223 bytes, plan accordingly + namePrefix: "" + nameSuffix: "" + + # documented below are several blocks + # pick the option appropriate for you based on what your backing fs is and desired features + # you do not need to alter dev_attribs under normal circumstances but they may be altered in advanced use-cases + lunTemplate: + # btrfs thin provisioning + type: "BLUN" + # tpws = Hardware-assisted zeroing + # caw = Hardware-assisted locking + # 3pc = Hardware-assisted data transfer + # tpu = Space reclamation + # can_snapshot = Snapshot + #dev_attribs: + #- dev_attrib: emulate_tpws + # enable: 1 + #- dev_attrib: emulate_caw + # enable: 1 + #- dev_attrib: emulate_3pc + # enable: 1 + #- dev_attrib: emulate_tpu + # enable: 0 + #- dev_attrib: can_snapshot + # enable: 1 + + # btfs thick provisioning + # only zeroing and locking supported + #type: "BLUN_THICK" + # tpws = Hardware-assisted zeroing + # caw = Hardware-assisted locking + #dev_attribs: + #- dev_attrib: emulate_tpws + # enable: 1 + #- dev_attrib: emulate_caw + # enable: 1 + + # ext4 thinn provisioning UI sends everything with enabled=0 + #type: "THIN" + + # ext4 thin with advanced legacy features set + # can only alter tpu (all others are set as enabled=1) + #type: "ADV" + #dev_attribs: + #- dev_attrib: emulate_tpu + # enable: 1 + + # ext4 thick + # can only alter caw + #type: "FILE" + #dev_attribs: + #- dev_attrib: emulate_caw + # enable: 1 + + lunSnapshotTemplate: + is_locked: true + # https://kb.synology.com/en-me/DSM/tutorial/What_is_file_system_consistent_snapshot + is_app_consistent: true + + targetTemplate: + auth_type: 0 + max_sessions: 0 From c1a888826660721171c3e54fbeab357b137dd392 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 2 Sep 2021 22:51:52 -0600 Subject: [PATCH 44/44] release prep for 1.3.0 Signed-off-by: Travis Glenn Hansen --- CHANGELOG.md | 21 +++++++++++++++++++++ package.json | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad713ff..4ec7fbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +# v1.3.0 + +Released 2021-09-02 + +- use `ghcr.io` for images as well as docker hub (#90) +- introduce api-only drivers for freenas (`freenas-api-*`) +- `smb-client` driver which creates folders on an smb share +- `lustre-client` driver which creates folders on a lustre share + attaching to various volumes which have been pre-provisioned by the operator +- `synology-iscsi` driver +- various documentation improvements +- support for csi versions `1.4.0` and `1.5.0` +- reintroduce advanced options that allow control over `fsck` (#85) +- advanced options for customizing `mkfs` commands +- better handling of stale nfs connections +- do not log potentially sensitive data in mount commands +- timeouts on various commands to improve driver operations under adverse + conditions +- various fixes and improvements throughout +- dependency bumps + # v1.2.0 Released 2021-05-12 diff --git a/package.json b/package.json index 2b2398b..2db972f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "democratic-csi", - "version": "1.2.0", + "version": "1.3.0", "description": "kubernetes csi driver framework", "main": "bin/democratic-csi", "scripts": {