From 61a4adc6d1c2caf08369ddb00f0f8fec9a1be6be Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sat, 21 Nov 2020 17:15:22 -0700 Subject: [PATCH 01/20] support sudo setups, support apiKey with TrueNAS --- examples/freenas-iscsi.yaml | 7 +++ examples/freenas-nfs.yaml | 7 +++ examples/freenas-smb.yaml | 7 +++ examples/zfs-generic-iscsi.yaml | 1 + examples/zfs-generic-nfs.yaml | 1 + src/driver/controller-zfs-generic/index.js | 19 ++++--- src/driver/controller-zfs-ssh/index.js | 27 ++++++++++ src/driver/freenas/http/index.js | 62 ++++++++++++++-------- 8 files changed, 104 insertions(+), 27 deletions(-) diff --git a/examples/freenas-iscsi.yaml b/examples/freenas-iscsi.yaml index 33960bb..99a83fd 100644 --- a/examples/freenas-iscsi.yaml +++ b/examples/freenas-iscsi.yaml @@ -4,9 +4,15 @@ httpConnection: protocol: http host: server address port: 80 + # use only 1 of apiKey or username/password + # if both are present, apiKey is preferred + # apiKey is only available starting in TrueNAS-12 + #apiKey: username: root password: allowInsecure: true + # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + #apiVersion: 2 sshConnection: host: server address port: 22 @@ -21,6 +27,7 @@ zfs: # can be used to override defaults if necessary # the example below is useful for TrueNAS 12 #cli: + # sudoEnabled: true # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/examples/freenas-nfs.yaml b/examples/freenas-nfs.yaml index 6d915ab..141d305 100644 --- a/examples/freenas-nfs.yaml +++ b/examples/freenas-nfs.yaml @@ -4,9 +4,15 @@ httpConnection: protocol: http host: server address port: 80 + # use only 1 of apiKey or username/password + # if both are present, apiKey is preferred + # apiKey is only available starting in TrueNAS-12 + #apiKey: username: root password: allowInsecure: true + # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + #apiVersion: 2 sshConnection: host: server address port: 22 @@ -21,6 +27,7 @@ zfs: # can be used to override defaults if necessary # the example below is useful for TrueNAS 12 #cli: + # sudoEnabled: true # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/examples/freenas-smb.yaml b/examples/freenas-smb.yaml index 04a4be7..6704b11 100644 --- a/examples/freenas-smb.yaml +++ b/examples/freenas-smb.yaml @@ -4,9 +4,15 @@ httpConnection: protocol: http host: server address port: 80 + # use only 1 of apiKey or username/password + # if both are present, apiKey is preferred + # apiKey is only available starting in TrueNAS-12 + #apiKey: username: root password: allowInsecure: true + # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + #apiVersion: 2 sshConnection: host: server address port: 22 @@ -21,6 +27,7 @@ zfs: # can be used to override defaults if necessary # the example below is useful for TrueNAS 12 #cli: + # sudoEnabled: true # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/examples/zfs-generic-iscsi.yaml b/examples/zfs-generic-iscsi.yaml index eb65411..983d1d2 100644 --- a/examples/zfs-generic-iscsi.yaml +++ b/examples/zfs-generic-iscsi.yaml @@ -51,6 +51,7 @@ iscsi: # https://bugzilla.redhat.com/show_bug.cgi?id=1659195 # http://atodorov.org/blog/2015/04/07/how-to-configure-iscsi-target-on-red-hat-enterprise-linux-7/ shareStragetyTargetCli: + #sudoEnabled: true basename: "iqn.2003-01.org.linux-iscsi.ubuntu-19.x8664" tpg: attributes: diff --git a/examples/zfs-generic-nfs.yaml b/examples/zfs-generic-nfs.yaml index ed4f36b..ec660bc 100644 --- a/examples/zfs-generic-nfs.yaml +++ b/examples/zfs-generic-nfs.yaml @@ -17,6 +17,7 @@ zfs: # can be used to override defaults if necessary # the example below is useful for TrueNAS 12 #cli: + # sudoEnabled: true # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/src/driver/controller-zfs-generic/index.js b/src/driver/controller-zfs-generic/index.js index eb1dc6d..d91bb23 100644 --- a/src/driver/controller-zfs-generic/index.js +++ b/src/driver/controller-zfs-generic/index.js @@ -260,14 +260,21 @@ delete ${iscsiName} const sshClient = this.getSshClient(); data = data.trim(); + let command = "sh"; let args = ["-c"]; - let command = []; - command.push(`echo "${data}"`.trim()); - command.push("|"); - command.push("targetcli"); + let taregetCliCommand = []; + taregetCliCommand.push(`echo "${data}"`.trim()); + taregetCliCommand.push("|"); + taregetCliCommand.push("targetcli"); - args.push("'" + command.join(" ") + "'"); - return sshClient.exec(sshClient.buildCommand("sh", args)); + if (this.options.iscsi.shareStragetyTargetCli.sudoEnabled) { + command = "sudo"; + args.unshift("sh"); + } + + args.push("'" + taregetCliCommand.join(" ") + "'"); + + return sshClient.exec(sshClient.buildCommand(command, args)); } } diff --git a/src/driver/controller-zfs-ssh/index.js b/src/driver/controller-zfs-ssh/index.js index 67c1eb1..d0fe409 100644 --- a/src/driver/controller-zfs-ssh/index.js +++ b/src/driver/controller-zfs-ssh/index.js @@ -135,9 +135,24 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { options.paths = this.options.zfs.cli.paths; } + if ( + this.options.zfs.hasOwnProperty("cli") && + this.options.zfs.cli.hasOwnProperty("sudoEnabled") + ) { + options.sudo = this.getSudoEnabled(); + } + return new Zetabyte(options); } + getSudoEnabled() { + return this.options.zfs.cli.sudoEnabled === true; + } + + getSudoPath() { + return this.options.zfs.cli.paths.sudo || "/usr/bin/sudo"; + } + getDatasetParentName() { let datasetParentName = this.options.zfs.datasetParentName; datasetParentName = datasetParentName.replace(/\/$/, ""); @@ -671,6 +686,10 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { this.options.zfs.datasetPermissionsMode, properties.mountpoint.value, ]); + if (this.getSudoEnabled()) { + command = this.getSudoPath() + " " + command; + } + driver.ctx.logger.verbose("set permission command: %s", command); response = await sshClient.exec(command); } @@ -690,6 +709,10 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { : ""), properties.mountpoint.value, ]); + if (this.getSudoEnabled()) { + command = this.getSudoPath() + " " + command; + } + driver.ctx.logger.verbose("set ownership command: %s", command); response = await sshClient.exec(command); } @@ -703,6 +726,10 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { acl, properties.mountpoint.value, ]); + if (this.getSudoEnabled()) { + command = this.getSudoPath() + " " + command; + } + driver.ctx.logger.verbose("set acl command: %s", command); response = await sshClient.exec(command); } diff --git a/src/driver/freenas/http/index.js b/src/driver/freenas/http/index.js index 62c0685..6823c4e 100644 --- a/src/driver/freenas/http/index.js +++ b/src/driver/freenas/http/index.js @@ -19,7 +19,7 @@ class Client { host: server.host, port: server.port, //userinfo: server.username + ":" + server.password, - path: server.apiVersion == 1 ? "/api/v1.0" : "/api/v2.0" + path: server.apiVersion == 1 ? "/api/v1.0" : "/api/v2.0", }; return URI.serialize(options); } @@ -55,22 +55,27 @@ class Client { headers: { Accept: "application/json", "User-Agent": USER_AGENT, - "Content-Type": "application/json" + "Content-Type": "application/json", }, json: true, qs: data, agentOptions: { - rejectUnauthorized: !!!client.options.allowInsecure - } + rejectUnauthorized: !!!client.options.allowInsecure, + }, }; - request(options, function(err, res, body) { + request(options, function (err, res, body) { client.log_repsonse(...arguments, options); if (err) { reject(err); } resolve(res); - }).auth(client.options.username, client.options.password); + }).auth( + client.options.username, + client.options.password, + true, + client.options.apiKey + ); }); } @@ -87,22 +92,27 @@ class Client { headers: { Accept: "application/json", "User-Agent": USER_AGENT, - "Content-Type": "application/json" + "Content-Type": "application/json", }, json: true, body: data, agentOptions: { - rejectUnauthorized: !!!client.options.allowInsecure - } + rejectUnauthorized: !!!client.options.allowInsecure, + }, }; - request(options, function(err, res, body) { + request(options, function (err, res, body) { client.log_repsonse(...arguments, options); if (err) { reject(err); } resolve(res); - }).auth(client.options.username, client.options.password); + }).auth( + client.options.username, + client.options.password, + true, + client.options.apiKey + ); }); } @@ -119,22 +129,27 @@ class Client { headers: { Accept: "application/json", "User-Agent": USER_AGENT, - "Content-Type": "application/json" + "Content-Type": "application/json", }, json: true, body: data, agentOptions: { - rejectUnauthorized: !!!client.options.allowInsecure - } + rejectUnauthorized: !!!client.options.allowInsecure, + }, }; - request(options, function(err, res, body) { + request(options, function (err, res, body) { client.log_repsonse(...arguments, options); if (err) { reject(err); } resolve(res); - }).auth(client.options.username, client.options.password); + }).auth( + client.options.username, + client.options.password, + true, + client.options.apiKey + ); }); } @@ -151,22 +166,27 @@ class Client { headers: { Accept: "application/json", "User-Agent": USER_AGENT, - "Content-Type": "application/json" + "Content-Type": "application/json", }, json: true, body: data, agentOptions: { - rejectUnauthorized: !!!client.options.allowInsecure - } + rejectUnauthorized: !!!client.options.allowInsecure, + }, }; - request(options, function(err, res, body) { + request(options, function (err, res, body) { client.log_repsonse(...arguments, options); if (err) { reject(err); } resolve(res); - }).auth(client.options.username, client.options.password); + }).auth( + client.options.username, + client.options.password, + true, + client.options.apiKey + ); }); } } From 45b502da627442a3030f78a4836f5ce3e3fee71b Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 26 Nov 2020 11:56:44 -0700 Subject: [PATCH 02/20] prevent race conditions on iscsi asset deletion due to ID re-use --- examples/zfs-generic-iscsi.yaml | 1 + src/driver/freenas/index.js | 102 ++++++++++++++++++++++++++------ 2 files changed, 86 insertions(+), 17 deletions(-) diff --git a/examples/zfs-generic-iscsi.yaml b/examples/zfs-generic-iscsi.yaml index 983d1d2..5a74b55 100644 --- a/examples/zfs-generic-iscsi.yaml +++ b/examples/zfs-generic-iscsi.yaml @@ -17,6 +17,7 @@ zfs: # can be used to override defaults if necessary # the example below is useful for TrueNAS 12 #cli: + # sudoEnabled: true # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index 0f56d57..d9a030c 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -13,6 +13,8 @@ const FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME = "democratic-csi:freenas_iscsi_extent_id"; const FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME = "democratic-csi:freenas_iscsi_targettoextent_id"; +const FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_assets_name"; class FreeNASDriver extends ControllerZfsSshBaseDriver { /** @@ -524,7 +526,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // create target let target = { iscsi_target_name: iscsiName, - iscsi_target_alias: "", + iscsi_target_alias: "", // TODO: allow template for this }; response = await httpClient.post("/services/iscsi/target", target); @@ -651,7 +653,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } let extent = { - iscsi_target_extent_comment: "", + iscsi_target_extent_comment: "", // TODO: allow template for this value iscsi_target_extent_type: "Disk", // Disk/File, after save Disk becomes "ZVOL" iscsi_target_extent_name: iscsiName, iscsi_target_extent_insecure_tpc: extentInsecureTpc, @@ -849,7 +851,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { }); let extent = { - comment: "", + comment: "", // TODO: allow this to be templated type: "DISK", // Disk/File, after save Disk becomes "ZVOL" name: iscsiName, //iscsi_target_extent_naa: "0x3822690834aae6c5", @@ -974,6 +976,11 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { let iqn = basename + ":" + iscsiName; this.ctx.logger.info("FreeNAS iqn: " + iqn); + // store this off to make delete process more bullet proof + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME]: iscsiName, + }); + // iscsiadm -m discovery -t st -p 172.21.26.81 // iscsiadm -m node -T iqn.2011-03.lan.bitness.istgt:test -p bitness.lan -l @@ -1151,6 +1158,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME, FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME, FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME, + FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME, ]); } catch (err) { if (err.toString().includes("dataset does not exist")) { @@ -1164,6 +1172,10 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { let targetId = properties[FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME].value; let extentId = properties[FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME].value; + let iscsiName = + properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value; + let assetName; + let deleteAsset; switch (apiVersion) { case 1: @@ -1186,13 +1198,41 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // assume is gone for now if ([404, 500].includes(response.statusCode)) { } else { - response = await httpClient.delete(endpoint); - if (![200, 204].includes(response.statusCode)) { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error deleting iscsi target - extent: ${targetId} code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` + deleteAsset = true; + assetName = null; + + // checking if set for backwards compatibility + if (zb.helpers.isPropertyValueSet(iscsiName)) { + switch (apiVersion) { + case 1: + assetName = response.body.iscsi_target_name; + break; + case 2: + assetName = response.body.name; + break; + } + + if (assetName != iscsiName) { + deleteAsset = false; + } + } + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi target - extent: ${targetId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + this.ctx.logger.debug( + "not deleting iscsitarget asset as it appears ID %s has been re-used: zfs name - %s, iscsitarget name - %s", + targetId, + iscsiName, + assetName ); } } @@ -1210,13 +1250,41 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // assume is gone for now if ([404, 500].includes(response.statusCode)) { } else { - response = await httpClient.delete(endpoint); - if (![200, 204].includes(response.statusCode)) { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error deleting iscsi extent - extent: ${extentId} code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` + deleteAsset = true; + assetName = null; + + // checking if set for backwards compatibility + if (zb.helpers.isPropertyValueSet(iscsiName)) { + switch (apiVersion) { + case 1: + assetName = response.body.iscsi_target_extent_name; + break; + case 2: + assetName = response.body.name; + break; + } + + if (assetName != iscsiName) { + deleteAsset = false; + } + } + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi extent - extent: ${extentId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + this.ctx.logger.debug( + "not deleting iscsiextent asset as it appears ID %s has been re-used: zfs name - %s, iscsiextent name - %s", + extentId, + iscsiName, + assetName ); } } From 6c79b32f9b6b6aefc30401c19af64dc59be36fea Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Fri, 27 Nov 2020 14:08:38 -0700 Subject: [PATCH 03/20] add a cancel previous runs action --- .github/workflows/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 2ee797f..a5beede 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -13,6 +13,10 @@ jobs: runs-on: ubuntu-latest steps: + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.6.0 + with: + access_token: ${{ github.token }} - uses: actions/checkout@v2 - name: docker build run: | From c4a36750cdb2febf6941e47d81b2a374c589a230 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Fri, 27 Nov 2020 14:19:13 -0700 Subject: [PATCH 04/20] many fixes, support auto-detection of truenas binary paths and apiVersion, fix config typo, better support for potential race conditions with deleting shares --- examples/freenas-iscsi.yaml | 3 + examples/freenas-nfs.yaml | 3 + examples/freenas-smb.yaml | 3 + examples/zfs-generic-iscsi.yaml | 2 +- src/driver/controller-zfs-generic/index.js | 24 +- src/driver/controller-zfs-ssh/index.js | 39 ++-- src/driver/freenas/http/index.js | 2 +- src/driver/freenas/index.js | 247 ++++++++++++++++++--- 8 files changed, 258 insertions(+), 65 deletions(-) diff --git a/examples/freenas-iscsi.yaml b/examples/freenas-iscsi.yaml index 99a83fd..979a9a0 100644 --- a/examples/freenas-iscsi.yaml +++ b/examples/freenas-iscsi.yaml @@ -12,6 +12,7 @@ httpConnection: password: allowInsecure: true # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + # leave unset for auto-detection #apiVersion: 2 sshConnection: host: server address @@ -28,6 +29,8 @@ zfs: # the example below is useful for TrueNAS 12 #cli: # sudoEnabled: true + # + # leave paths unset for auto-detection # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/examples/freenas-nfs.yaml b/examples/freenas-nfs.yaml index 141d305..0baa7b2 100644 --- a/examples/freenas-nfs.yaml +++ b/examples/freenas-nfs.yaml @@ -12,6 +12,7 @@ httpConnection: password: allowInsecure: true # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + # leave unset for auto-detection #apiVersion: 2 sshConnection: host: server address @@ -28,6 +29,8 @@ zfs: # the example below is useful for TrueNAS 12 #cli: # sudoEnabled: true + # + # leave paths unset for auto-detection # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/examples/freenas-smb.yaml b/examples/freenas-smb.yaml index 6704b11..0e20e89 100644 --- a/examples/freenas-smb.yaml +++ b/examples/freenas-smb.yaml @@ -12,6 +12,7 @@ httpConnection: password: allowInsecure: true # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well) + # leave unset for auto-detection #apiVersion: 2 sshConnection: host: server address @@ -28,6 +29,8 @@ zfs: # the example below is useful for TrueNAS 12 #cli: # sudoEnabled: true + # + # leave paths unset for auto-detection # paths: # zfs: /usr/local/sbin/zfs # zpool: /usr/local/sbin/zpool diff --git a/examples/zfs-generic-iscsi.yaml b/examples/zfs-generic-iscsi.yaml index 5a74b55..3a647af 100644 --- a/examples/zfs-generic-iscsi.yaml +++ b/examples/zfs-generic-iscsi.yaml @@ -51,7 +51,7 @@ iscsi: # http://www.linux-iscsi.org/wiki/ISCSI # https://bugzilla.redhat.com/show_bug.cgi?id=1659195 # http://atodorov.org/blog/2015/04/07/how-to-configure-iscsi-target-on-red-hat-enterprise-linux-7/ - shareStragetyTargetCli: + shareStrategyTargetCli: #sudoEnabled: true basename: "iqn.2003-01.org.linux-iscsi.ubuntu-19.x8664" tpg: diff --git a/src/driver/controller-zfs-generic/index.js b/src/driver/controller-zfs-generic/index.js index d91bb23..2cb0810 100644 --- a/src/driver/controller-zfs-generic/index.js +++ b/src/driver/controller-zfs-generic/index.js @@ -26,7 +26,7 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver { * @param {*} datasetName */ async createShare(call, datasetName) { - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); const sshClient = this.getSshClient(); let properties; @@ -105,25 +105,25 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver { switch (this.options.iscsi.shareStrategy) { case "targetCli": - basename = this.options.iscsi.shareStragetyTargetCli.basename; + basename = this.options.iscsi.shareStrategyTargetCli.basename; let setAttributesText = ""; let setAuthText = ""; - if (this.options.iscsi.shareStragetyTargetCli.tpg) { - if (this.options.iscsi.shareStragetyTargetCli.tpg.attributes) { + if (this.options.iscsi.shareStrategyTargetCli.tpg) { + if (this.options.iscsi.shareStrategyTargetCli.tpg.attributes) { for (const attributeName in this.options.iscsi - .shareStragetyTargetCli.tpg.attributes) { + .shareStrategyTargetCli.tpg.attributes) { const attributeValue = this.options.iscsi - .shareStragetyTargetCli.tpg.attributes[attributeName]; + .shareStrategyTargetCli.tpg.attributes[attributeName]; setAttributesText += "\n"; setAttributesText += `set attribute ${attributeName}=${attributeValue}`; } } - if (this.options.iscsi.shareStragetyTargetCli.tpg.auth) { + if (this.options.iscsi.shareStrategyTargetCli.tpg.auth) { for (const attributeName in this.options.iscsi - .shareStragetyTargetCli.tpg.auth) { + .shareStrategyTargetCli.tpg.auth) { const attributeValue = this.options.iscsi - .shareStragetyTargetCli.tpg.auth[attributeName]; + .shareStrategyTargetCli.tpg.auth[attributeName]; setAttributesText += "\n"; setAttributesText += `set auth ${attributeName}=${attributeValue}`; } @@ -178,7 +178,7 @@ create /backstores/block/${iscsiName} } async deleteShare(call, datasetName) { - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); const sshClient = this.getSshClient(); let response; @@ -210,7 +210,7 @@ create /backstores/block/${iscsiName} iscsiName = iscsiName.toLowerCase(); switch (this.options.iscsi.shareStrategy) { case "targetCli": - basename = this.options.iscsi.shareStragetyTargetCli.basename; + basename = this.options.iscsi.shareStrategyTargetCli.basename; response = await this.targetCliCommand( ` cd /iscsi @@ -267,7 +267,7 @@ delete ${iscsiName} taregetCliCommand.push("|"); taregetCliCommand.push("targetcli"); - if (this.options.iscsi.shareStragetyTargetCli.sudoEnabled) { + if (this.options.iscsi.shareStrategyTargetCli.sudoEnabled) { command = "sudo"; args.unshift("sh"); } diff --git a/src/driver/controller-zfs-ssh/index.js b/src/driver/controller-zfs-ssh/index.js index d0fe409..c32d41d 100644 --- a/src/driver/controller-zfs-ssh/index.js +++ b/src/driver/controller-zfs-ssh/index.js @@ -122,7 +122,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { }); } - getZetabyte() { + async getZetabyte() { const sshClient = this.getSshClient(); const options = {}; options.executor = new ZfsSshProcessManager(sshClient); @@ -130,6 +130,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { if ( this.options.zfs.hasOwnProperty("cli") && + this.options.zfs.cli && this.options.zfs.cli.hasOwnProperty("paths") ) { options.paths = this.options.zfs.cli.paths; @@ -137,20 +138,26 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { if ( this.options.zfs.hasOwnProperty("cli") && + this.options.zfs.cli && this.options.zfs.cli.hasOwnProperty("sudoEnabled") ) { options.sudo = this.getSudoEnabled(); } + if (typeof this.setZetabyteCustomOptions === "function") { + await this.setZetabyteCustomOptions(options); + } + return new Zetabyte(options); } getSudoEnabled() { - return this.options.zfs.cli.sudoEnabled === true; + return this.options.zfs.cli && this.options.zfs.cli.sudoEnabled === true; } - getSudoPath() { - return this.options.zfs.cli.paths.sudo || "/usr/bin/sudo"; + async getSudoPath() { + const zb = await this.getZetabyte(); + return zb.options.paths.sudo || "/usr/bin/sudo"; } getDatasetParentName() { @@ -175,7 +182,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { } async removeSnapshotsFromDatatset(datasetName, options = {}) { - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); await zb.zfs.destroy(datasetName + "@%", options); } @@ -265,7 +272,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { const driver = this; const driverZfsResourceType = this.getDriverZfsResourceType(); const sshClient = this.getSshClient(); - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let datasetParentName = this.getVolumeParentDatasetName(); let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName(); @@ -687,7 +694,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { properties.mountpoint.value, ]); if (this.getSudoEnabled()) { - command = this.getSudoPath() + " " + command; + command = (await this.getSudoPath()) + " " + command; } driver.ctx.logger.verbose("set permission command: %s", command); @@ -710,7 +717,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { properties.mountpoint.value, ]); if (this.getSudoEnabled()) { - command = this.getSudoPath() + " " + command; + command = (await this.getSudoPath()) + " " + command; } driver.ctx.logger.verbose("set ownership command: %s", command); @@ -727,7 +734,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { properties.mountpoint.value, ]); if (this.getSudoEnabled()) { - command = this.getSudoPath() + " " + command; + command = (await this.getSudoPath()) + " " + command; } driver.ctx.logger.verbose("set acl command: %s", command); @@ -799,7 +806,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { */ async DeleteVolume(call) { const driver = this; - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let datasetParentName = this.getVolumeParentDatasetName(); let name = call.request.volume_id; @@ -904,7 +911,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { async ControllerExpandVolume(call) { const driver = this; const driverZfsResourceType = this.getDriverZfsResourceType(); - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let datasetParentName = this.getVolumeParentDatasetName(); let name = call.request.volume_id; @@ -1017,7 +1024,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { */ async GetCapacity(call) { const driver = this; - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let datasetParentName = this.getVolumeParentDatasetName(); @@ -1054,7 +1061,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { async ListVolumes(call) { const driver = this; const driverZfsResourceType = this.getDriverZfsResourceType(); - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let datasetParentName = this.getVolumeParentDatasetName(); let entries = []; @@ -1239,7 +1246,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { async ListSnapshots(call) { const driver = this; const driverZfsResourceType = this.getDriverZfsResourceType(); - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let entries = []; let entries_length = 0; @@ -1471,7 +1478,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { async CreateSnapshot(call) { const driver = this; const driverZfsResourceType = this.getDriverZfsResourceType(); - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let detachedSnapshot = false; try { @@ -1705,7 +1712,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { */ async DeleteSnapshot(call) { const driver = this; - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); const snapshot_id = call.request.snapshot_id; diff --git a/src/driver/freenas/http/index.js b/src/driver/freenas/http/index.js index 6823c4e..44ffa33 100644 --- a/src/driver/freenas/http/index.js +++ b/src/driver/freenas/http/index.js @@ -4,7 +4,7 @@ const USER_AGENT = "democratic-csi-driver"; class Client { constructor(options = {}) { - this.options = options; + this.options = JSON.parse(JSON.stringify(options)); this.logger = console; // default to v1.0 for now diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index d9a030c..c246a4f 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -15,7 +15,6 @@ const FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME = "democratic-csi:freenas_iscsi_targettoextent_id"; const FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:freenas_iscsi_assets_name"; - class FreeNASDriver extends ControllerZfsSshBaseDriver { /** * cannot make this a storage class parameter as storage class/etc context is *not* sent @@ -36,9 +35,30 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } } - getHttpClient() { + async setZetabyteCustomOptions(options) { + if (!options.hasOwnProperty("paths")) { + const majorMinor = await this.getSystemVersionMajorMinor(); + const isScale = await this.getIsScale(); + if (!isScale && Number(majorMinor) >= 12) { + options.paths = { + zfs: "/usr/local/sbin/zfs", + zpool: "/usr/local/sbin/zpool", + sudo: "/usr/local/bin/sudo", + chroot: "/usr/sbin/chroot", + }; + } + } + } + + async getHttpClient(autoDetectVersion = true) { const client = new HttpClient(this.options.httpConnection); client.logger = this.ctx.logger; + + if (autoDetectVersion && !!!this.options.httpConnection.apiVersion) { + const apiVersion = await this.getApiVersion(); + client.setApiVersion(apiVersion); + } + return client; } @@ -62,7 +82,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { if (!match || Object.keys(match).length < 1) { return; } - const httpClient = this.getHttpClient(); + const httpClient = await this.getHttpClient(); let target; let page = 0; @@ -126,9 +146,9 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { */ async createShare(call, datasetName) { const driverShareType = this.getDriverShareType(); - const httpClient = this.getHttpClient(); + const httpClient = await this.getHttpClient(); const apiVersion = httpClient.getApiVersion(); - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let properties; let endpoint; @@ -1017,19 +1037,22 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { async deleteShare(call, datasetName) { const driverShareType = this.getDriverShareType(); - const httpClient = this.getHttpClient(); + const httpClient = await this.getHttpClient(); const apiVersion = httpClient.getApiVersion(); - const zb = this.getZetabyte(); + const zb = await this.getZetabyte(); let properties; let response; let endpoint; let shareId; + let deleteAsset; + let sharePaths; switch (driverShareType) { case "nfs": try { properties = await zb.zfs.get(datasetName, [ + "mountpoint", FREENAS_NFS_SHARE_PROPERTY_NAME, ]); } catch (err) { @@ -1063,18 +1086,33 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // assume share is gone for now if ([404, 500].includes(response.statusCode)) { } else { - response = await httpClient.delete(endpoint); + switch (apiVersion) { + case 1: + sharePaths = response.body.nfs_paths; + break; + case 2: + sharePaths = response.body.paths; + break; + } - // returns a 500 if does not exist - // v1 = 204 - // v2 = 200 - if (![200, 204].includes(response.statusCode)) { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error deleting nfs share - share: ${shareId} code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); + deleteAsset = sharePaths.some((value) => { + return value == properties.mountpoint.value; + }); + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + + // returns a 500 if does not exist + // v1 = 204 + // v2 = 200 + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting nfs share - share: ${shareId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } } } break; @@ -1089,6 +1127,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { case "smb": try { properties = await zb.zfs.get(datasetName, [ + "mountpoint", FREENAS_SMB_SHARE_PROPERTY_NAME, ]); } catch (err) { @@ -1125,18 +1164,33 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // assume share is gone for now if ([404, 500].includes(response.statusCode)) { } else { - response = await httpClient.delete(endpoint); + switch (apiVersion) { + case 1: + sharePaths = [response.body.cifs_path]; + break; + case 2: + sharePaths = [response.body.path]; + break; + } - // returns a 500 if does not exist - // v1 = 204 - // v2 = 200 - if (![200, 204].includes(response.statusCode)) { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error deleting smb share - share: ${shareId} code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); + deleteAsset = sharePaths.some((value) => { + return value == properties.mountpoint.value; + }); + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + + // returns a 500 if does not exist + // v1 = 204 + // v2 = 200 + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting smb share - share: ${shareId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } } } break; @@ -1175,7 +1229,6 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { let iscsiName = properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value; let assetName; - let deleteAsset; switch (apiVersion) { case 1: @@ -1310,10 +1363,18 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { switch (driverShareType) { case "iscsi": - this.ctx.logger.verbose("FreeNAS reloading ctld"); - await sshClient.exec( - sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]) - ); + const isScale = this.getIsScale(); + if (isScale) { + this.ctx.logger.verbose("FreeNAS reloading scst"); + await sshClient.exec( + sshClient.buildCommand("systemctl", ["reload", "scst"]) + ); + } else { + this.ctx.logger.verbose("FreeNAS reloading ctld"); + await sshClient.exec( + sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]) + ); + } break; } } @@ -1321,11 +1382,120 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { async getApiVersion() { const systemVersion = await this.getSystemVersion(); + if (systemVersion.v2) { + return 2; + } + return 1; } + async getIsFreeNAS() { + const systemVersion = await this.getSystemVersion(); + let version; + + if (systemVersion.v2) { + version = systemVersion.v2; + } else { + version = systemVersion.v1.fullversion; + } + + if (version.toLowerCase().includes("freenas")) { + return true; + } + + return false; + } + + async getIsTrueNAS() { + const systemVersion = await this.getSystemVersion(); + let version; + + if (systemVersion.v2) { + version = systemVersion.v2; + } else { + version = systemVersion.v1.fullversion; + } + + if (version.toLowerCase().includes("truenas")) { + return true; + } + + return false; + } + + async getIsScale() { + const systemVersion = await this.getSystemVersion(); + + if (systemVersion.v2 && systemVersion.v2.toLowerCase().includes("scale")) { + return true; + } + + return false; + } + + async getSystemVersionMajorMinor() { + const systemVersion = await this.getSystemVersion(); + let parts; + let parts_i; + let version; + + /* + systemVersion.v2 = "FreeNAS-11.2-U5"; + systemVersion.v2 = "TrueNAS-SCALE-20.11-MASTER-20201127-092915"; + systemVersion.v1 = { + fullversion: "FreeNAS-9.3-STABLE-201503200528", + fullversion: "FreeNAS-11.2-U5 (c129415c52)", + }; + + systemVersion.v2 = null; + */ + + if (systemVersion.v2) { + version = systemVersion.v2; + } else { + version = systemVersion.v1.fullversion; + } + + if (version) { + parts = version.split("-"); + parts_i = []; + parts.forEach((value) => { + let i = value.replace(/[^\d.]/g, ""); + if (i.length > 0) { + parts_i.push(i); + } + }); + + // join and resplit to deal with single elements which contain a decimal + parts_i = parts_i.join(".").split("."); + parts_i.splice(2); + return parts_i.join("."); + } + } + + async getSystemVersionMajor() { + const majorMinor = await this.getSystemVersionMajorMinor(); + return majorMinor.split(".")[0]; + } + + async setVersionInfoCache(versionInfo) { + const driver = this; + this.cache = this.cache || {}; + this.cache.versionInfo = versionInfo; + + // crude timeout + setTimeout(function () { + driver.cache.versionInfo = null; + }, 60 * 1000); + } + async getSystemVersion() { - const httpClient = this.getHttpClient(); + this.cache = this.cache || {}; + if (this.cache.versionInfo) { + return this.cache.versionInfo; + } + + const httpClient = await this.getHttpClient(false); const endpoint = "/system/version/"; let response; const startApiVersion = httpClient.getApiVersion(); @@ -1334,12 +1504,18 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { httpClient.setApiVersion(2); /** * FreeNAS-11.2-U5 + * TrueNAS-12.0-RELEASE + * TrueNAS-SCALE-20.11-MASTER-20201127-092915 */ try { response = await httpClient.get(endpoint); if (response.statusCode == 200) { versionInfo.v2 = response.body; } + + // return immediately to save on resources and silly requests + await this.setVersionInfoCache(versionInfo); + return versionInfo; } catch (e) {} httpClient.setApiVersion(1); @@ -1357,6 +1533,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // reset apiVersion httpClient.setApiVersion(startApiVersion); + await this.setVersionInfoCache(versionInfo); return versionInfo; } } From 5076ca36649b5a44c744f84ee850acdd92bf0185 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sat, 28 Nov 2020 22:56:28 -0700 Subject: [PATCH 05/20] minor bug fixes, iscsi multipath/device-mapper support --- src/driver/index.js | 317 +++++++++++++++++++++++++++------------- src/utils/filesystem.js | 146 +++++++++++++++++- src/utils/iscsi.js | 53 ++++--- 3 files changed, 391 insertions(+), 125 deletions(-) diff --git a/src/driver/index.js b/src/driver/index.js index 056b7c6..388bb57 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -254,6 +254,7 @@ class CsiBaseDriver { * @param {*} call */ async NodeStageVolume(call) { + const driver = this; const mount = new Mount(); const filesystem = new Filesystem(); const iscsi = new ISCSI(); @@ -310,46 +311,141 @@ class CsiBaseDriver { device = `//${volume_context.server}/${volume_context.share}`; break; case "iscsi": - // create DB entry - // https://library.netapp.com/ecmdocs/ECMP1654943/html/GUID-8EC685B4-8CB6-40D8-A8D5-031A3899BCDC.html - // put these options in place to force targets managed by csi to be explicitly attached (in the case of unclearn shutdown etc) - let nodeDB = { - "node.startup": "manual", - }; - const nodeDBKeyPrefix = "node-db."; - for (const key in normalizedSecrets) { - if (key.startsWith(nodeDBKeyPrefix)) { - nodeDB[key.substr(nodeDBKeyPrefix.length)] = normalizedSecrets[key]; - } + let portals = []; + if (volume_context.portal) { + portals.push(volume_context.portal.trim()); } - await iscsi.iscsiadm.createNodeDBEntry( - volume_context.iqn, - volume_context.portal, - nodeDB - ); - // login - await iscsi.iscsiadm.login(volume_context.iqn, volume_context.portal); - // find device name - device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; + if (volume_context.portals) { + volume_context.portals.split(",").forEach((portal) => { + portals.push(portal.trim()); + }); + } - // can take some time for device to show up, loop for some period - result = await filesystem.pathExists(device); - let timer_start = Math.round(new Date().getTime() / 1000); - let timer_max = 30; - while (!result) { - await sleep(2000); + // ensure full portal value + portals = portals.map((value) => { + if (!value.includes(":")) { + value += ":3260"; + } + + return value.trim(); + }); + + // ensure unique entries only + portals = [...new Set(portals)]; + + let iscsiDevices = []; + + for (let portal of portals) { + // create DB entry + // https://library.netapp.com/ecmdocs/ECMP1654943/html/GUID-8EC685B4-8CB6-40D8-A8D5-031A3899BCDC.html + // put these options in place to force targets managed by csi to be explicitly attached (in the case of unclearn shutdown etc) + let nodeDB = { + "node.startup": "manual", + }; + const nodeDBKeyPrefix = "node-db."; + for (const key in normalizedSecrets) { + if (key.startsWith(nodeDBKeyPrefix)) { + nodeDB[key.substr(nodeDBKeyPrefix.length)] = + normalizedSecrets[key]; + } + } + await iscsi.iscsiadm.createNodeDBEntry( + volume_context.iqn, + portal, + nodeDB + ); + // login + await iscsi.iscsiadm.login(volume_context.iqn, portal); + + // find device name + device = `/dev/disk/by-path/ip-${portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; + let deviceByPath = device; + + // can take some time for device to show up, loop for some period result = await filesystem.pathExists(device); - let current_time = Math.round(new Date().getTime() / 1000); - if (!result && current_time - timer_start > timer_max) { - throw new GrpcError( - grpc.status.UNKNOWN, - `hit timeout waiting for device node to appear: ${device}` + let timer_start = Math.round(new Date().getTime() / 1000); + let timer_max = 30; + let deviceCreated = result; + while (!result) { + await sleep(2000); + result = await filesystem.pathExists(device); + + if (result) { + deviceCreated = true; + break; + } + + let current_time = Math.round(new Date().getTime() / 1000); + if (!result && current_time - timer_start > timer_max) { + driver.ctx.logger.warn( + `hit timeout waiting for device node to appear: ${device}` + ); + break; + } + } + + if (deviceCreated) { + device = await filesystem.realpath(device); + iscsiDevices.push(device); + + driver.ctx.logger.info( + `successfully logged into portal ${portal} and created device ${deviceByPath} with realpath ${device}` ); } } - device = await filesystem.realpath(device); + // filter duplicates + iscsiDevices = iscsiDevices.filter((value, index, self) => { + return self.indexOf(value) === index; + }); + + // only throw an error if we were not able to attach to *any* devices + if (iscsiDevices.length < 1) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unable to attach any iscsi devices` + ); + } + + if (iscsiDevices.length != portals.length) { + driver.ctx.logger.warn( + `failed to attach all iscsi devices/targets/portals` + ); + + // TODO: allow a parameter to control this behavior in some form + if (false) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unable to attach all iscsi devices` + ); + } + } + + // compare all device-mapper slaves with the newly created devices + // if any of the new devices are device-mapper slaves treat this as a + // multipath scenario + let allDeviceMapperSlaves = await filesystem.getAllDeviceMapperSlaveDevices(); + let commonDevices = allDeviceMapperSlaves.filter((value) => + iscsiDevices.includes(value) + ); + + const useMultipath = portals.length > 1 || commonDevices.length > 0; + + // discover multipath device to use + if (useMultipath) { + device = await filesystem.getDeviceMapperDeviceFromSlaves( + iscsiDevices, + false + ); + + if (!device) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed to discover multipath device` + ); + } + } break; default: throw new GrpcError( @@ -463,6 +559,7 @@ class CsiBaseDriver { const iscsi = new ISCSI(); let result; let is_block = false; + let is_device_mapper = false; let block_device_info; let access_type = "mount"; @@ -505,78 +602,100 @@ class CsiBaseDriver { } if (is_block) { - if (block_device_info.tran == "iscsi") { - // figure out which iscsi session this belongs to and logout - // scan /dev/disk/by-path/ip-*? - // device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; - // parse output from `iscsiadm -m session -P 3` - let sessions = await iscsi.iscsiadm.getSessionsDetails(); - for (let i = 0; i < sessions.length; i++) { - let session = sessions[i]; - let is_attached_to_session = false; + let realBlockDeviceInfos = []; + // detect if is a multipath device + is_device_mapper = await filesystem.isDeviceMapperDevice( + block_device_info.path + ); - if ( - session.attached_scsi_devices && - session.attached_scsi_devices.host && - session.attached_scsi_devices.host.devices - ) { - is_attached_to_session = session.attached_scsi_devices.host.devices.some( - (device) => { - if (device.attached_scsi_disk == block_device_info.name) { - return true; + if (is_device_mapper) { + let realBlockDevices = await filesystem.getDeviceMapperDeviceSlaves( + block_device_info.path + ); + for (const realBlockDevice of realBlockDevices) { + realBlockDeviceInfos.push( + await filesystem.getBlockDevice(realBlockDevice) + ); + } + } else { + realBlockDeviceInfos = [block_device_info]; + } + + // TODO: this could be made async to detach all simultaneously + for (const block_device_info_i of realBlockDeviceInfos) { + if (block_device_info_i.tran == "iscsi") { + // figure out which iscsi session this belongs to and logout + // scan /dev/disk/by-path/ip-*? + // device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; + // parse output from `iscsiadm -m session -P 3` + let sessions = await iscsi.iscsiadm.getSessionsDetails(); + for (let i = 0; i < sessions.length; i++) { + let session = sessions[i]; + let is_attached_to_session = false; + + if ( + session.attached_scsi_devices && + session.attached_scsi_devices.host && + session.attached_scsi_devices.host.devices + ) { + is_attached_to_session = session.attached_scsi_devices.host.devices.some( + (device) => { + if (device.attached_scsi_disk == block_device_info_i.name) { + return true; + } + return false; } - return false; - } - ); - } - - if (is_attached_to_session) { - let timer_start; - let timer_max; - - timer_start = Math.round(new Date().getTime() / 1000); - timer_max = 30; - let loggedOut = false; - while (!loggedOut) { - try { - await iscsi.iscsiadm.logout(session.target, [ - session.persistent_portal, - ]); - loggedOut = true; - } catch (err) { - await sleep(2000); - let current_time = Math.round(new Date().getTime() / 1000); - if (current_time - timer_start > timer_max) { - // not throwing error for now as future invocations would not enter code path anyhow - loggedOut = true; - //throw new GrpcError( - // grpc.status.UNKNOWN, - // `hit timeout trying to logout of iscsi target: ${session.persistent_portal}` - //); - } - } + ); } - timer_start = Math.round(new Date().getTime() / 1000); - timer_max = 30; - let deletedEntry = false; - while (!deletedEntry) { - try { - await iscsi.iscsiadm.deleteNodeDBEntry( - session.target, - session.persistent_portal - ); - deletedEntry = true; - } catch (err) { - await sleep(2000); - let current_time = Math.round(new Date().getTime() / 1000); - if (current_time - timer_start > timer_max) { - // not throwing error for now as future invocations would not enter code path anyhow + if (is_attached_to_session) { + let timer_start; + let timer_max; + + timer_start = Math.round(new Date().getTime() / 1000); + timer_max = 30; + let loggedOut = false; + while (!loggedOut) { + try { + await iscsi.iscsiadm.logout(session.target, [ + session.persistent_portal, + ]); + loggedOut = true; + } catch (err) { + await sleep(2000); + let current_time = Math.round(new Date().getTime() / 1000); + if (current_time - timer_start > timer_max) { + // not throwing error for now as future invocations would not enter code path anyhow + loggedOut = true; + //throw new GrpcError( + // grpc.status.UNKNOWN, + // `hit timeout trying to logout of iscsi target: ${session.persistent_portal}` + //); + } + } + } + + timer_start = Math.round(new Date().getTime() / 1000); + timer_max = 30; + let deletedEntry = false; + while (!deletedEntry) { + try { + await iscsi.iscsiadm.deleteNodeDBEntry( + session.target, + session.persistent_portal + ); deletedEntry = true; - //throw new GrpcError( - // grpc.status.UNKNOWN, - // `hit timeout trying to delete iscsi node DB entry: ${session.target}, ${session.persistent_portal}` - //); + } catch (err) { + await sleep(2000); + let current_time = Math.round(new Date().getTime() / 1000); + if (current_time - timer_start > timer_max) { + // not throwing error for now as future invocations would not enter code path anyhow + deletedEntry = true; + //throw new GrpcError( + // grpc.status.UNKNOWN, + // `hit timeout trying to delete iscsi node DB entry: ${session.target}, ${session.persistent_portal}` + //); + } } } } diff --git a/src/utils/filesystem.js b/src/utils/filesystem.js index fb03909..4e59b28 100644 --- a/src/utils/filesystem.js +++ b/src/utils/filesystem.js @@ -48,14 +48,156 @@ class Filesystem { const device_path = await filesystem.realpath(device); const blockdevices = await filesystem.getAllBlockDevices(); - return blockdevices.some((i) => { - if (i.path == device_path) { + return blockdevices.some(async (i) => { + if ((await filesystem.realpath(i.path)) == device_path) { return true; } return false; }); } + /** + * Attempt to discover if the device is a device-mapper device + * + * @param {*} device + */ + async isDeviceMapperDevice(device) { + const filesystem = this; + const isBlock = await filesystem.isBlockDevice(device); + + if (!isBlock) { + return false; + } + + device = await filesystem.realpath(device); + + return device.includes("dm-"); + } + + async isDeviceMapperSlaveDevice(device) { + const filesystem = this; + device = await filesystem.realpath(device); + } + + /** + * Get all device-mapper devices (ie: dm-0, dm-1, dm-N...) + */ + async getAllDeviceMapperDevices() { + const filesystem = this; + let result; + let devices = []; + let args = [ + "-c", + 'for file in $(ls -la /dev/mapper/* | grep "\\->" | grep -oP "\\-> .+" | grep -oP " .+"); do echo $(F=$(echo $file | grep -oP "[a-z0-9-]+");echo $F":"$(ls "/sys/block/${F}/slaves/");); done;', + ]; + + try { + result = await filesystem.exec("sh", args); + + for (const dm of result.stdout.trim().split("\n")) { + devices.push("/dev/" + dm.split(":")[0].trim()); + } + return devices; + } catch (err) { + throw err; + } + } + + async getAllDeviceMapperSlaveDevices() { + const filesystem = this; + let result; + let args = [ + "-c", + 'for file in $(ls -la /dev/mapper/* | grep "\\->" | grep -oP "\\-> .+" | grep -oP " .+"); do echo $(F=$(echo $file | grep -oP "[a-z0-9-]+");echo $F":"$(ls "/sys/block/${F}/slaves/");); done;', + ]; + let slaves = []; + + try { + result = await filesystem.exec("sh", args); + + for (const dm of result.stdout.trim().split("\n")) { + const realDevices = dm + .split(":")[1] + .split(" ") + .map((value) => { + return "/dev/" + value.trim(); + }); + slaves.push(...realDevices); + } + return slaves; + } catch (err) { + throw err; + } + } + + /** + * Get all slave devices connected to a device-mapper device + * + * @param {*} device + */ + async getDeviceMapperDeviceSlaves(device) { + const filesystem = this; + device = await filesystem.realpath(device); + let device_info = await filesystem.getBlockDevice(device); + const slaves = []; + + let result; + let args = [`/sys/block/${device_info.kname}/slaves/`]; + + try { + result = await filesystem.exec("ls", args); + + for (const entry of result.stdout.split("\n")) { + if (entry.trim().length < 1) { + continue; + } + + slaves.push("/dev/" + entry.trim()); + } + return slaves; + } catch (err) { + throw err; + } + } + + async getDeviceMapperDeviceFromSlaves(slaves, matchAll = true) { + const filesystem = this; + let result; + + // get mapping of dm devices to real devices + let args = [ + "-c", + 'for file in $(ls -la /dev/mapper/* | grep "\\->" | grep -oP "\\-> .+" | grep -oP " .+"); do echo $(F=$(echo $file | grep -oP "[a-z0-9-]+");echo $F":"$(ls "/sys/block/${F}/slaves/");); done;', + ]; + + result = await filesystem.exec("sh", args); + + for (const dm of result.stdout.trim().split("\n")) { + const dmDevice = "/dev/" + dm.split(":")[0].trim(); + const realDevices = dm + .split(":")[1] + .split(" ") + .map((value) => { + return "/dev/" + value.trim(); + }); + const intersectDevices = slaves.filter((value) => + realDevices.includes(value) + ); + + if (matchAll === false && intersectDevices.length > 0) { + return dmDevice; + } + + // if all 3 have the same elements we have a winner + if ( + intersectDevices.length == realDevices.length && + realDevices.length == slaves.length + ) { + return dmDevice; + } + } + } + /** * create symlink * diff --git a/src/utils/iscsi.js b/src/utils/iscsi.js index dbca409..36216f7 100644 --- a/src/utils/iscsi.js +++ b/src/utils/iscsi.js @@ -25,7 +25,7 @@ class ISCSI { if (!options.executor) { options.executor = { - spawn: cp.spawn + spawn: cp.spawn, }; } @@ -47,7 +47,7 @@ class ISCSI { const entries = result.stdout.trim().split("\n"); const interfaces = []; let fields; - entries.forEach(entry => { + entries.forEach((entry) => { fields = entry.split(" "); interfaces.push({ iface_name: fields[0], @@ -55,7 +55,7 @@ class ISCSI { hwaddress: getIscsiValue(fields[1].split(",")[1]), ipaddress: getIscsiValue(fields[1].split(",")[2]), net_ifacename: getIscsiValue(fields[1].split(",")[3]), - initiatorname: getIscsiValue(fields[1].split(",")[4]) + initiatorname: getIscsiValue(fields[1].split(",")[4]), }); }); @@ -75,7 +75,7 @@ class ISCSI { const entries = result.stdout.trim().split("\n"); const i = {}; let fields, key, value; - entries.forEach(entry => { + entries.forEach((entry) => { if (entry.startsWith("#")) return; fields = entry.split("="); key = fields[0].trim(); @@ -103,7 +103,7 @@ class ISCSI { "-p", portal, "-o", - "new" + "new", ]); await iscsi.exec(options.paths.iscsiadm, args); for (let attribute in attributes) { @@ -120,7 +120,7 @@ class ISCSI { "--name", attribute, "--value", - attributes[attribute] + attributes[attribute], ]); await iscsi.exec(options.paths.iscsiadm, args); } @@ -142,7 +142,7 @@ class ISCSI { "-p", portal, "-o", - "delete" + "delete", ]); await iscsi.exec(options.paths.iscsiadm, args); }, @@ -174,7 +174,7 @@ class ISCSI { const entries = result.stdout.trim().split("\n"); const sessions = []; let fields; - entries.forEach(entry => { + entries.forEach((entry) => { fields = entry.split(" "); sessions.push({ protocol: entry.split(":")[0], @@ -182,7 +182,7 @@ class ISCSI { portal: fields[2].split(",")[0], target_portal_group_tag: fields[2].split(",")[1], iqn: fields[3].split(":")[0], - target: fields[3].split(":")[1] + target: fields[3].split(":")[1], }); }); @@ -212,6 +212,7 @@ class ISCSI { return []; } + let currentTarget; let sessionGroups = []; let currentSession = []; @@ -221,13 +222,21 @@ class ISCSI { entries.shift(); entries.shift(); + // this should break up the lines into groups of lines + // where each group is the full details of a single session + // note that the output of the command bundles/groups all sessions + // by target so extra logic is needed to hanle that + // alternatively we could get all sessions using getSessions() + // and then invoke `iscsiadm -m session -P 3 -r ` in a loop for (let i = 0; i < entries.length; i++) { let entry = entries[i]; if (entry.startsWith("Target:")) { + currentTarget = entry; + } else if (entry.trim().startsWith("Current Portal:")) { if (currentSession.length > 0) { sessionGroups.push(currentSession); } - currentSession = [entry]; + currentSession = [currentTarget, entry]; } else { currentSession.push(entry); } @@ -261,11 +270,7 @@ class ISCSI { .trim() .replace(/ /g, "_") .replace(/\W/g, ""); - let value = line - .split(":") - .slice(1) - .join(":") - .trim(); + let value = line.split(":").slice(1).join(":").trim(); if (currentSection) { session[currentSection] = session[currentSection] || {}; @@ -282,7 +287,7 @@ class ISCSI { .split(":") .slice(1) .join(":") - .trim() + .trim(), }; while ( sessionLines[j + 1] && @@ -308,7 +313,7 @@ class ISCSI { .split(":") .slice(1) .join(":") - .trim() + .trim(), }); } @@ -322,7 +327,7 @@ class ISCSI { key = key.charAt(0).toLowerCase() + key.slice(1); key = key.replace( /[A-Z]/g, - letter => `_${letter.toLowerCase()}` + (letter) => `_${letter.toLowerCase()}` ); break; } @@ -367,12 +372,12 @@ class ISCSI { const entries = result.stdout.trim().split("\n"); const targets = []; - entries.forEach(entry => { + entries.forEach((entry) => { targets.push({ portal: entry.split(",")[0], target_portal_group_tag: entry.split(" ")[0].split(",")[1], iqn: entry.split(" ")[1].split(":")[0], - target: entry.split(" ")[1].split(":")[1] + target: entry.split(" ")[1].split(":")[1], }); }); @@ -432,7 +437,7 @@ class ISCSI { } return true; - } + }, }; } @@ -460,15 +465,15 @@ class ISCSI { } return new Promise((resolve, reject) => { - child.stdout.on("data", function(data) { + child.stdout.on("data", function (data) { stdout = stdout + data; }); - child.stderr.on("data", function(data) { + child.stderr.on("data", function (data) { stderr = stderr + data; }); - child.on("close", function(code) { + child.on("close", function (code) { const result = { code, stdout, stderr }; if (timeout) { clearTimeout(timeout); From da6ff1b950967e62717ec237ffcaf616e4554ea2 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sat, 28 Nov 2020 23:07:55 -0700 Subject: [PATCH 06/20] minor fix for empty values --- src/utils/filesystem.js | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/utils/filesystem.js b/src/utils/filesystem.js index 4e59b28..e7e84ef 100644 --- a/src/utils/filesystem.js +++ b/src/utils/filesystem.js @@ -95,6 +95,9 @@ class Filesystem { result = await filesystem.exec("sh", args); for (const dm of result.stdout.trim().split("\n")) { + if (dm.length < 1) { + continue; + } devices.push("/dev/" + dm.split(":")[0].trim()); } return devices; @@ -116,6 +119,9 @@ class Filesystem { result = await filesystem.exec("sh", args); for (const dm of result.stdout.trim().split("\n")) { + if (dm.length < 1) { + continue; + } const realDevices = dm .split(":")[1] .split(" ") @@ -173,6 +179,9 @@ class Filesystem { result = await filesystem.exec("sh", args); for (const dm of result.stdout.trim().split("\n")) { + if (dm.length < 1) { + continue; + } const dmDevice = "/dev/" + dm.split(":")[0].trim(); const realDevices = dm .split(":")[1] From a519c3fff4ed8b4d80ffd4f6fd5e019926e4a792 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sun, 29 Nov 2020 10:02:53 -0700 Subject: [PATCH 07/20] resize support for device-mapper/multipath --- Dockerfile | 3 +++ docker/multipath | 3 +++ src/driver/index.js | 24 +++++++++++++++++++++++- src/utils/filesystem.js | 15 +++++++++++---- 4 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 docker/multipath diff --git a/Dockerfile b/Dockerfile index 02f0b7e..ff5734f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,6 +38,9 @@ RUN apt-get update && \ ADD docker/iscsiadm /usr/local/sbin RUN chmod +x /usr/local/sbin/iscsiadm +ADD docker/multipath /usr/local/sbin +RUN chmod +x /usr/local/sbin/multipath + # Run as a non-root user RUN useradd --create-home csi \ && mkdir /home/csi/app \ diff --git a/docker/multipath b/docker/multipath new file mode 100644 index 0000000..0a95bc8 --- /dev/null +++ b/docker/multipath @@ -0,0 +1,3 @@ +#!/bin/bash + +chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/sbin:/usr/bin" multipath "${@:1}" diff --git a/src/driver/index.js b/src/driver/index.js index 388bb57..a5ad8fa 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -395,6 +395,10 @@ class CsiBaseDriver { } } + // let things settle + // this will help in dm scenarios + await sleep(2000); + // filter duplicates iscsiDevices = iscsiDevices.filter((value, index, self) => { return self.indexOf(value) === index; @@ -936,6 +940,7 @@ class CsiBaseDriver { let is_block = false; let is_formatted; let fs_type; + let is_device_mapper = false; const volume_id = call.request.volume_id; const volume_path = call.request.volume_path; @@ -972,7 +977,24 @@ class CsiBaseDriver { } if (is_block) { - await filesystem.rescanDevice(device); + let rescan_devices = []; + // detect if is a multipath device + is_device_mapper = await filesystem.isDeviceMapperDevice(device); + if (is_device_mapper) { + // NOTE: want to make sure we scan the dm device *after* all the underlying slaves + rescan_devices = await filesystem.getDeviceMapperDeviceSlaves(device); + } + + rescan_devices.push(device); + + for (let sdevice of rescan_devices) { + await filesystem.rescanDevice(sdevice); + } + + // let things settle + // it appears the dm devices can take a second to figure things out + await sleep(2000); + if (is_formatted && access_type == "mount") { fs_info = await filesystem.getDeviceFilesystemInfo(device); fs_type = fs_info.type; diff --git a/src/utils/filesystem.js b/src/utils/filesystem.js index e7e84ef..7bb3684 100644 --- a/src/utils/filesystem.js +++ b/src/utils/filesystem.js @@ -415,12 +415,19 @@ class Filesystem { ); } + let is_device_mapper_device = await filesystem.isDeviceMapperDevice(device); result = await filesystem.realpath(device); - device_name = result.split("/").pop(); - // echo 1 > /sys/block/sdb/device/rescan - const sys_file = `/sys/block/${device_name}/device/rescan`; - fs.writeFileSync(sys_file, "1"); + if (is_device_mapper_device) { + // multipath -r /dev/dm-0 + result = await filesystem.exec("multipath", ["-r", device]); + } else { + device_name = result.split("/").pop(); + + // echo 1 > /sys/block/sdb/device/rescan + const sys_file = `/sys/block/${device_name}/device/rescan`; + fs.writeFileSync(sys_file, "1"); + } } /** From 06a54f9c7c1caf88ee3520d58dd1947e80a02570 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sun, 29 Nov 2020 13:06:06 -0700 Subject: [PATCH 08/20] docker dependency cleanup --- .dockerignore | 2 ++ Dockerfile | 9 ++------- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.dockerignore b/.dockerignore index 5479770..22353b8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,3 +2,5 @@ chart dev examples node_modules +Dockerfile* +TODO.md diff --git a/Dockerfile b/Dockerfile index ff5734f..cf4db53 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ ARG BUILDPLATFORM RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 -ENV LANG=en_US.utf8 NODE_VERSION=v12.15.0 +ENV LANG=en_US.utf8 NODE_VERSION=v12.20.0 RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" @@ -20,7 +20,7 @@ ENV PATH=/usr/local/lib/nodejs/bin:$PATH # node service requirements RUN apt-get update && \ - apt-get install -y xfsprogs fatresize dosfstools open-iscsi lsscsi sg3-utils multipath-tools scsitools nfs-common cifs-utils sudo && \ + apt-get install -y e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ rm -rf /var/lib/apt/lists/* # controller requirements @@ -55,10 +55,5 @@ COPY --chown=csi:csi . . USER root -# remove build deps -#RUN apt-get update && \ -# apt-get purge -y python make gcc g++ && \ -# rm -rf /var/lib/apt/lists/* - EXPOSE 50051 ENTRYPOINT [ "bin/democratic-csi" ] From 8cde1a19797b4f09dc272eaaf4170e5d0dab48bc Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sun, 29 Nov 2020 13:09:34 -0700 Subject: [PATCH 09/20] multi-stage build to shrink docker images --- Dockerfile | 83 ++++++++++++++++++++++++++++++---------------- Dockerfile.unified | 59 ++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 29 deletions(-) create mode 100644 Dockerfile.unified diff --git a/Dockerfile b/Dockerfile index cf4db53..faa14b5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,21 @@ -FROM debian:10-slim +FROM debian:10-slim AS build -ENV DEBIAN_FRONTEND=noninteractive +#FROM --platform=$BUILDPLATFORM debian:10-slim AS build ARG TARGETPLATFORM ARG BUILDPLATFORM +RUN echo "I am running build on $BUILDPLATFORM, building for $TARGETPLATFORM" + RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 -ENV LANG=en_US.utf8 NODE_VERSION=v12.20.0 +ENV LANG=en_US.utf8 +ENV NODE_VERSION=v12.20.0 +#ENV NODE_VERSION=v14.15.1 -RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" +# install build deps +RUN apt-get update && apt-get install -y python make gcc g++ # install node RUN apt-get update && apt-get install -y wget xz-utils @@ -18,29 +23,6 @@ ADD docker/node-installer.sh /usr/local/sbin RUN chmod +x /usr/local/sbin/node-installer.sh && node-installer.sh ENV PATH=/usr/local/lib/nodejs/bin:$PATH -# node service requirements -RUN apt-get update && \ - apt-get install -y e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ - rm -rf /var/lib/apt/lists/* - -# controller requirements -RUN apt-get update && \ - apt-get install -y ansible && \ - rm -rf /var/lib/apt/lists/* - -# npm requirements -# gcc and g++ required by grpc-usd until proper upstream support -RUN apt-get update && \ - apt-get install -y python make gcc g++ && \ - rm -rf /var/lib/apt/lists/* - -# install wrappers -ADD docker/iscsiadm /usr/local/sbin -RUN chmod +x /usr/local/sbin/iscsiadm - -ADD docker/multipath /usr/local/sbin -RUN chmod +x /usr/local/sbin/multipath - # Run as a non-root user RUN useradd --create-home csi \ && mkdir /home/csi/app \ @@ -50,10 +32,53 @@ USER csi COPY package*.json ./ RUN npm install - COPY --chown=csi:csi . . +RUN rm -rf docker -USER root + +###################### +# actual image +###################### +FROM debian:10-slim + +ARG TARGETPLATFORM +ARG BUILDPLATFORM + +RUN echo "I am running on final $BUILDPLATFORM, building for $TARGETPLATFORM" + +RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ + && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 + +ENV LANG=en_US.utf8 + +# install node +ENV PATH=/usr/local/lib/nodejs/bin:$PATH +COPY --from=build /usr/local/lib/nodejs /usr/local/lib/nodejs + +# node service requirements +RUN apt-get update && \ + apt-get install -y e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ + rm -rf /var/lib/apt/lists/* + +# controller requirements +#RUN apt-get update && \ +# apt-get install -y ansible && \ +# rm -rf /var/lib/apt/lists/* + +# install wrappers +ADD docker/iscsiadm /usr/local/sbin +RUN chmod +x /usr/local/sbin/iscsiadm + +ADD docker/multipath /usr/local/sbin +RUN chmod +x /usr/local/sbin/multipath + +# Run as a non-root user +RUN useradd --create-home csi \ + && chown -R csi: /home/csi + +COPY --from=build --chown=csi:csi /home/csi/app /home/csi/app + +WORKDIR /home/csi/app EXPOSE 50051 ENTRYPOINT [ "bin/democratic-csi" ] diff --git a/Dockerfile.unified b/Dockerfile.unified new file mode 100644 index 0000000..cf4db53 --- /dev/null +++ b/Dockerfile.unified @@ -0,0 +1,59 @@ +FROM debian:10-slim + +ENV DEBIAN_FRONTEND=noninteractive + +ARG TARGETPLATFORM +ARG BUILDPLATFORM + +RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ + && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 + +ENV LANG=en_US.utf8 NODE_VERSION=v12.20.0 + +RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" + +# install node +RUN apt-get update && apt-get install -y wget xz-utils +ADD docker/node-installer.sh /usr/local/sbin +RUN chmod +x /usr/local/sbin/node-installer.sh && node-installer.sh +ENV PATH=/usr/local/lib/nodejs/bin:$PATH + +# node service requirements +RUN apt-get update && \ + apt-get install -y e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ + rm -rf /var/lib/apt/lists/* + +# controller requirements +RUN apt-get update && \ + apt-get install -y ansible && \ + rm -rf /var/lib/apt/lists/* + +# npm requirements +# gcc and g++ required by grpc-usd until proper upstream support +RUN apt-get update && \ + apt-get install -y python make gcc g++ && \ + rm -rf /var/lib/apt/lists/* + +# install wrappers +ADD docker/iscsiadm /usr/local/sbin +RUN chmod +x /usr/local/sbin/iscsiadm + +ADD docker/multipath /usr/local/sbin +RUN chmod +x /usr/local/sbin/multipath + +# Run as a non-root user +RUN useradd --create-home csi \ + && mkdir /home/csi/app \ + && chown -R csi: /home/csi +WORKDIR /home/csi/app +USER csi + +COPY package*.json ./ +RUN npm install + +COPY --chown=csi:csi . . + +USER root + +EXPOSE 50051 +ENTRYPOINT [ "bin/democratic-csi" ] From 001301d53eddacce330ccded17c13d9475a3ebd9 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Sun, 29 Nov 2020 22:59:32 -0700 Subject: [PATCH 10/20] better delete race-condition handling, resize capacity for zvols --- src/driver/controller-zfs-ssh/index.js | 6 +- src/driver/freenas/index.js | 234 ++++++++++++++----------- 2 files changed, 136 insertions(+), 104 deletions(-) diff --git a/src/driver/controller-zfs-ssh/index.js b/src/driver/controller-zfs-ssh/index.js index c32d41d..66526f5 100644 --- a/src/driver/controller-zfs-ssh/index.js +++ b/src/driver/controller-zfs-ssh/index.js @@ -1012,7 +1012,11 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { await this.expandVolume(call, datasetName); return { - capacity_bytes: this.options.zfs.datasetEnableQuotas ? capacity_bytes : 0, + capacity_bytes: + this.options.zfs.datasetEnableQuotas || + driverZfsResourceType == "volume" + ? capacity_bytes + : 0, node_expansion_required: driverZfsResourceType == "volume" ? true : false, }; } diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index c246a4f..cbf160b 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -1066,12 +1066,9 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { shareId = properties[FREENAS_NFS_SHARE_PROPERTY_NAME].value; - // remove nfs share - if ( - properties && - properties[FREENAS_NFS_SHARE_PROPERTY_NAME] && - properties[FREENAS_NFS_SHARE_PROPERTY_NAME].value != "-" - ) { + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(shareId)) { + // remove nfs share switch (apiVersion) { case 1: case 2: @@ -1113,6 +1110,13 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } body: ${JSON.stringify(response.body)}` ); } + + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await zb.zfs.inherit( + datasetName, + FREENAS_NFS_SHARE_PROPERTY_NAME + ); } } break; @@ -1141,12 +1145,9 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { shareId = properties[FREENAS_SMB_SHARE_PROPERTY_NAME].value; - // remove smb share - if ( - properties && - properties[FREENAS_SMB_SHARE_PROPERTY_NAME] && - properties[FREENAS_SMB_SHARE_PROPERTY_NAME].value != "-" - ) { + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(shareId)) { + // remove smb share switch (apiVersion) { case 1: case 2: @@ -1191,6 +1192,13 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } body: ${JSON.stringify(response.body)}` ); } + + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await zb.zfs.inherit( + datasetName, + FREENAS_SMB_SHARE_PROPERTY_NAME + ); } } break; @@ -1233,112 +1241,132 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { switch (apiVersion) { case 1: case 2: - // https://jira.ixsystems.com/browse/NAS-103952 + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(targetId)) { + // https://jira.ixsystems.com/browse/NAS-103952 - // v1 - /services/iscsi/target/{id}/ - // v2 - /iscsi/target/id/{id} - endpoint = ""; - if (apiVersion == 1) { - endpoint += "/services"; - } - endpoint += "/iscsi/target/"; - if (apiVersion == 2) { - endpoint += "id/"; - } - endpoint += targetId; - response = await httpClient.get(endpoint); - - // assume is gone for now - if ([404, 500].includes(response.statusCode)) { - } else { - deleteAsset = true; - assetName = null; - - // checking if set for backwards compatibility - if (zb.helpers.isPropertyValueSet(iscsiName)) { - switch (apiVersion) { - case 1: - assetName = response.body.iscsi_target_name; - break; - case 2: - assetName = response.body.name; - break; - } - - if (assetName != iscsiName) { - deleteAsset = false; - } + // v1 - /services/iscsi/target/{id}/ + // v2 - /iscsi/target/id/{id} + endpoint = ""; + if (apiVersion == 1) { + endpoint += "/services"; } + endpoint += "/iscsi/target/"; + if (apiVersion == 2) { + endpoint += "id/"; + } + endpoint += targetId; + response = await httpClient.get(endpoint); - if (deleteAsset) { - response = await httpClient.delete(endpoint); - if (![200, 204].includes(response.statusCode)) { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error deleting iscsi target - extent: ${targetId} code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` + // assume is gone for now + if ([404, 500].includes(response.statusCode)) { + } else { + deleteAsset = true; + assetName = null; + + // checking if set for backwards compatibility + if (zb.helpers.isPropertyValueSet(iscsiName)) { + switch (apiVersion) { + case 1: + assetName = response.body.iscsi_target_name; + break; + case 2: + assetName = response.body.name; + break; + } + + if (assetName != iscsiName) { + deleteAsset = false; + } + } + + if (deleteAsset) { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi target - extent: ${targetId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await zb.zfs.inherit( + datasetName, + FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME + ); + } else { + this.ctx.logger.debug( + "not deleting iscsitarget asset as it appears ID %s has been re-used: zfs name - %s, iscsitarget name - %s", + targetId, + iscsiName, + assetName ); } + } + } + + // only remove if the process has not succeeded already + if (zb.helpers.isPropertyValueSet(extentId)) { + // v1 - /services/iscsi/targettoextent/{id}/ + // v2 - /iscsi/targetextent/id/{id} + if (apiVersion == 1) { + endpoint = "/services/iscsi/extent/"; } else { - this.ctx.logger.debug( - "not deleting iscsitarget asset as it appears ID %s has been re-used: zfs name - %s, iscsitarget name - %s", - targetId, - iscsiName, - assetName - ); + endpoint = "/iscsi/extent/id/"; } - } + endpoint += extentId; + response = await httpClient.get(endpoint); - // v1 - /services/iscsi/targettoextent/{id}/ - // v2 - /iscsi/targetextent/id/{id} - if (apiVersion == 1) { - endpoint = "/services/iscsi/extent/"; - } else { - endpoint = "/iscsi/extent/id/"; - } - endpoint += extentId; - response = await httpClient.get(endpoint); + // assume is gone for now + if ([404, 500].includes(response.statusCode)) { + } else { + deleteAsset = true; + assetName = null; - // assume is gone for now - if ([404, 500].includes(response.statusCode)) { - } else { - deleteAsset = true; - assetName = null; + // checking if set for backwards compatibility + if (zb.helpers.isPropertyValueSet(iscsiName)) { + switch (apiVersion) { + case 1: + assetName = response.body.iscsi_target_extent_name; + break; + case 2: + assetName = response.body.name; + break; + } - // checking if set for backwards compatibility - if (zb.helpers.isPropertyValueSet(iscsiName)) { - switch (apiVersion) { - case 1: - assetName = response.body.iscsi_target_extent_name; - break; - case 2: - assetName = response.body.name; - break; + if (assetName != iscsiName) { + deleteAsset = false; + } } - if (assetName != iscsiName) { - deleteAsset = false; - } - } + if (deleteAsset) { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi extent - extent: ${extentId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } - if (deleteAsset) { - response = await httpClient.delete(endpoint); - if (![200, 204].includes(response.statusCode)) { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error deleting iscsi extent - extent: ${extentId} code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` + // remove property to prevent delete race conditions + // due to id re-use by FreeNAS/TrueNAS + await zb.zfs.inherit( + datasetName, + FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME + ); + } else { + this.ctx.logger.debug( + "not deleting iscsiextent asset as it appears ID %s has been re-used: zfs name - %s, iscsiextent name - %s", + extentId, + iscsiName, + assetName ); } - } else { - this.ctx.logger.debug( - "not deleting iscsiextent asset as it appears ID %s has been re-used: zfs name - %s, iscsiextent name - %s", - extentId, - iscsiName, - assetName - ); } } break; From 4f48cfc640228fb2757298e61ad5178d39c4e5b2 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Mon, 30 Nov 2020 00:32:13 -0700 Subject: [PATCH 11/20] fix missing await, errors on failure to reload iscsi service --- src/driver/freenas/index.js | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index cbf160b..c9e982f 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -1388,20 +1388,35 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { async expandVolume(call, datasetName) { const driverShareType = this.getDriverShareType(); const sshClient = this.getSshClient(); + let response; switch (driverShareType) { case "iscsi": - const isScale = this.getIsScale(); + const isScale = await this.getIsScale(); if (isScale) { this.ctx.logger.verbose("FreeNAS reloading scst"); - await sshClient.exec( + response = await sshClient.exec( sshClient.buildCommand("systemctl", ["reload", "scst"]) ); + + if (response.code != 0) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error reloading scst: ${JSON.stringify(response)}` + ); + } } else { this.ctx.logger.verbose("FreeNAS reloading ctld"); - await sshClient.exec( + response = await sshClient.exec( sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]) ); + + if (response.code != 0) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error reloading ctld: ${JSON.stringify(response)}` + ); + } } break; } From 3924f08ae873be83946c4a7083f238461353cb97 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Mon, 30 Nov 2020 09:42:58 -0700 Subject: [PATCH 12/20] sudo support for reloading iscsi daemon, error handling for failed chown/chmod/setacl --- src/driver/controller-zfs-ssh/index.js | 20 ++++++++++++++++ src/driver/freenas/index.js | 33 ++++++++++++++------------ 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/src/driver/controller-zfs-ssh/index.js b/src/driver/controller-zfs-ssh/index.js index 66526f5..9da45dc 100644 --- a/src/driver/controller-zfs-ssh/index.js +++ b/src/driver/controller-zfs-ssh/index.js @@ -699,6 +699,14 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { driver.ctx.logger.verbose("set permission command: %s", command); response = await sshClient.exec(command); + if (response.code != 0) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error setting permissions on dataset: ${JSON.stringify( + response + )}` + ); + } } // set ownership @@ -722,6 +730,12 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { driver.ctx.logger.verbose("set ownership command: %s", command); response = await sshClient.exec(command); + if (response.code != 0) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error setting ownership on dataset: ${JSON.stringify(response)}` + ); + } } // set acls @@ -739,6 +753,12 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver { driver.ctx.logger.verbose("set acl command: %s", command); response = await sshClient.exec(command); + if (response.code != 0) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error setting acl on dataset: ${JSON.stringify(response)}` + ); + } } } diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index c9e982f..3adca8b 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -1393,28 +1393,31 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { switch (driverShareType) { case "iscsi": const isScale = await this.getIsScale(); + let command; + let reload = false; if (isScale) { - this.ctx.logger.verbose("FreeNAS reloading scst"); - response = await sshClient.exec( - sshClient.buildCommand("systemctl", ["reload", "scst"]) - ); - - if (response.code != 0) { - throw new GrpcError( - grpc.status.UNKNOWN, - `error reloading scst: ${JSON.stringify(response)}` - ); - } + command = sshClient.buildCommand("systemctl", ["reload", "scst"]); + reload = true; } else { - this.ctx.logger.verbose("FreeNAS reloading ctld"); - response = await sshClient.exec( - sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]) + command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); + reload = true; + } + + if (reload) { + if (this.getSudoEnabled()) { + command = (await this.getSudoPath()) + " " + command; + } + + this.ctx.logger.verbose( + "FreeNAS reloading iscsi daemon: %s", + command ); + response = await sshClient.exec(command); if (response.code != 0) { throw new GrpcError( grpc.status.UNKNOWN, - `error reloading ctld: ${JSON.stringify(response)}` + `error reloading iscsi daemon: ${JSON.stringify(response)}` ); } } From 00dcf0b60ab6b7b6be6efaa35c0ed45ca138a993 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Mon, 30 Nov 2020 16:19:04 -0700 Subject: [PATCH 13/20] better duplicate iscsi targettoextent detection --- package-lock.json | 227 +++++++++++++----------------------- package.json | 6 +- src/driver/freenas/index.js | 23 ++-- 3 files changed, 98 insertions(+), 158 deletions(-) diff --git a/package-lock.json b/package-lock.json index 408f015..1e71a0c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -50,9 +50,9 @@ } }, "@eslint/eslintrc": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.1.3.tgz", - "integrity": "sha512-4YVwPkANLeNtRjMekzux1ci8hIaH5eGKktGqR0d3LWsKNn5B2X/1Z6Trxy7jQXl9EBGE6Yj02O+t09FMeRllaA==", + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.2.1.tgz", + "integrity": "sha512-XRUeBZ5zBWLYgSANMpThFddrZZkEbGHgUdt5UJjZfnlN9BGCiUBrf+nvbRupSjMvqzwnQN0qwCmOxITt1cfywA==", "requires": { "ajv": "^6.12.4", "debug": "^4.1.1", @@ -67,9 +67,9 @@ }, "dependencies": { "ajv": { - "version": "6.12.4", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.4.tgz", - "integrity": "sha512-eienB2c9qVQs2KWexhkrdMLVDoIQCz5KSeLxwg9Lzk4DOfBtIK9PQwwufcsn1jjGuf9WZmqPMbGxOzfcuphJCQ==", + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -169,11 +169,6 @@ "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" }, - "@types/color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" - }, "@types/long": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", @@ -185,14 +180,14 @@ "integrity": "sha512-dJ9vXxJ8MEwzNn4GkoAGauejhXoKuJyYKegsA6Af25ZpEDXomeVXt5HUWUNVHk5UN7+U0f6ghC6otwt+7PdSDg==" }, "acorn": { - "version": "7.4.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.0.tgz", - "integrity": "sha512-+G7P8jJmCHr+S+cLfQxygbWhXy+8YTVGzAkpEbcLo2mLoL7tij/VG41QSHACSf5QgYRhMZYHuNc6drJaO0Da+w==" + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==" }, "acorn-jsx": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz", - "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==" + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", + "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==" }, "ajv": { "version": "6.12.3", @@ -324,11 +319,6 @@ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==" }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" - }, "caseless": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", @@ -344,11 +334,10 @@ }, "dependencies": { "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "requires": { - "@types/color-name": "^1.1.1", "color-convert": "^2.0.1" } }, @@ -381,13 +370,13 @@ } }, "cliui": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", - "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", "requires": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", - "wrap-ansi": "^6.2.0" + "wrap-ansi": "^7.0.0" }, "dependencies": { "emoji-regex": { @@ -504,11 +493,11 @@ } }, "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "requires": { - "ms": "^2.1.1" + "ms": "2.1.2" } }, "decamelize": { @@ -570,27 +559,32 @@ "ansi-colors": "^4.1.1" } }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" + }, "escape-string-regexp": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" }, "eslint": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.8.1.tgz", - "integrity": "sha512-/2rX2pfhyUG0y+A123d0ccXtMm7DV7sH1m3lk9nk2DZ2LReq39FXHueR9xZwshE5MdfSf0xunSaMWRqyIA6M1w==", + "version": "7.14.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.14.0.tgz", + "integrity": "sha512-5YubdnPXrlrYAFCKybPuHIAH++PINe1pmKNc5wQRB9HSbqIK1ywAnntE3Wwua4giKu0bjligf1gLF6qxMGOYRA==", "requires": { "@babel/code-frame": "^7.0.0", - "@eslint/eslintrc": "^0.1.3", + "@eslint/eslintrc": "^0.2.1", "ajv": "^6.10.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.0.1", "doctrine": "^3.0.0", "enquirer": "^2.3.5", - "eslint-scope": "^5.1.0", + "eslint-scope": "^5.1.1", "eslint-utils": "^2.1.0", - "eslint-visitor-keys": "^1.3.0", + "eslint-visitor-keys": "^2.0.0", "espree": "^7.3.0", "esquery": "^1.2.0", "esutils": "^2.0.2", @@ -620,11 +614,11 @@ } }, "eslint-scope": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.0.tgz", - "integrity": "sha512-iiGRvtxWqgtx5m8EyQUJihBloE4EnYeGE/bz1wSPwJE6tZuJUtHlhqDM4Xj2ukE8Dyy1+HCZ4hE0fzIVMzb58w==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", "requires": { - "esrecurse": "^4.1.0", + "esrecurse": "^4.3.0", "estraverse": "^4.1.1" } }, @@ -634,12 +628,19 @@ "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", "requires": { "eslint-visitor-keys": "^1.1.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==" + } } }, "eslint-visitor-keys": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", - "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.0.0.tgz", + "integrity": "sha512-QudtT6av5WXels9WjIM7qz1XD1cWGvX4gGXvp/zBn9nXG02D0utdU3Em2m/QjTnrsk6bBjmCygl3rmj118msQQ==" }, "espree": { "version": "7.3.0", @@ -649,6 +650,13 @@ "acorn": "^7.4.0", "acorn-jsx": "^5.2.0", "eslint-visitor-keys": "^1.3.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==" + } } }, "esprima": { @@ -739,15 +747,6 @@ "flat-cache": "^2.0.1" } }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, "flat-cache": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", @@ -903,9 +902,9 @@ "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==" }, "import-fresh": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz", - "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.2.tgz", + "integrity": "sha512-cTPNrlvJT6twpYy+YmKUKrTSjWFs3bjYjAhCwm+z4EOCubZxAuO+hHpRN64TqjEaYSHs7tJAE0w1CKMGmsG/lw==", "requires": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -1055,14 +1054,6 @@ "type-check": "~0.4.0" } }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "requires": { - "p-locate": "^4.1.0" - } - }, "lodash": { "version": "4.17.20", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", @@ -1232,27 +1223,6 @@ "lcid": "^1.0.0" } }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "requires": { - "p-limit": "^2.2.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - }, "parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -1261,11 +1231,6 @@ "callsites": "^3.0.0" } }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" - }, "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", @@ -1452,11 +1417,6 @@ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" }, - "require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" - }, "resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -1507,11 +1467,6 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==" }, - "set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" - }, "shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -1732,14 +1687,14 @@ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" }, "uuid": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.0.tgz", - "integrity": "sha512-fX6Z5o4m6XsXBdli9g7DtWgAx+osMsRRZFKma1mIUsLCz6vRvv+pz5VNbyu9UEDzpMWulZfvpgb/cmDXVulYFQ==" + "version": "8.3.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.1.tgz", + "integrity": "sha512-FOmRr+FmWEIG8uhZv6C2bTgEVXsHk08kE7mPlrBbEe+c3r9pjceVPgupIfNIhc4yx55H69OXANrUaSuu9eInKg==" }, "v8-compile-cache": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.1.tgz", - "integrity": "sha512-8OQ9CL+VWyt3JStj7HX7/ciTL2V3Rl1Wf5OL+SNTm0yK1KvtReVulksyeRnCANHHuUxHlQig+JJDlUhBt1NQDQ==" + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.2.0.tgz", + "integrity": "sha512-gTpR5XQNKFwOd4clxfnhaqvfqMpqEwr4tOtCyz4MtYZX2JYhfr1JvBFKdS+7K/9rfpZR3VLX+YWBbKoxCgS43Q==" }, "verror": { "version": "1.10.0", @@ -1759,11 +1714,6 @@ "isexe": "^2.0.0" } }, - "which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" - }, "window-size": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz", @@ -1834,9 +1784,9 @@ "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=" }, "wrap-ansi": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "requires": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -1844,11 +1794,10 @@ }, "dependencies": { "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "requires": { - "@types/color-name": "^1.1.1", "color-convert": "^2.0.1" } }, @@ -1901,9 +1850,9 @@ } }, "y18n": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", - "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==" + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.5.tgz", + "integrity": "sha512-hsRUr4FFrvhhRH12wOdfs38Gy7k2FFzB9qgN9v3aLykRq0dRcdcpz5C9FxdS2NuhOrI/628b/KSTJ3rwHysYSg==" }, "yallist": { "version": "4.0.0", @@ -1911,21 +1860,17 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "yargs": { - "version": "15.4.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", - "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "version": "16.1.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.1.1.tgz", + "integrity": "sha512-hAD1RcFP/wfgfxgMVswPE+z3tlPFtxG8/yWUrG2i17sTWGCGqWnxKcLTF4cUKDUK8fzokwsmO9H0TDkRbMHy8w==", "requires": { - "cliui": "^6.0.0", - "decamelize": "^1.2.0", - "find-up": "^4.1.0", - "get-caller-file": "^2.0.1", + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", "string-width": "^4.2.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^18.1.2" + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" }, "dependencies": { "emoji-regex": { @@ -1951,13 +1896,9 @@ } }, "yargs-parser": { - "version": "18.1.3", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", - "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==" } } } diff --git a/package.json b/package.json index cfbcf76..82acc4e 100644 --- a/package.json +++ b/package.json @@ -20,7 +20,7 @@ "dependencies": { "@grpc/proto-loader": "^0.5.5", "bunyan": "^1.8.14", - "eslint": "^7.8.1", + "eslint": "^7.14.0", "grpc-uds": "^0.1.4", "handlebars": "^4.7.6", "js-yaml": "^3.14.0", @@ -28,8 +28,8 @@ "request": "^2.88.2", "ssh2": "^0.8.9", "uri-js": "^4.4.0", - "uuid": "^8.3.0", + "uuid": "^8.3.1", "winston": "^3.3.3", - "yargs": "^15.4.1" + "yargs": "^16.1.1" } } diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index 3adca8b..cef7035 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -746,12 +746,12 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // Extent is already in this target. if ( response.statusCode == 409 && - JSON.stringify(response.body).includes( + (JSON.stringify(response.body).includes( "Extent is already in this target." - ) && - JSON.stringify(response.body).includes( - "LUN ID is already being used for this target." - ) + ) || + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + )) ) { targetToExtent = await this.findResourceByProperties( "/services/iscsi/targettoextent", @@ -941,12 +941,12 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // Extent is already in this target. if ( response.statusCode == 422 && - JSON.stringify(response.body).includes( + (JSON.stringify(response.body).includes( "Extent is already in this target." - ) && - JSON.stringify(response.body).includes( - "LUN ID is already being used for this target." - ) + ) || + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + )) ) { targetToExtent = await this.findResourceByProperties( "/iscsi/targetextent", @@ -1388,7 +1388,6 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { async expandVolume(call, datasetName) { const driverShareType = this.getDriverShareType(); const sshClient = this.getSshClient(); - let response; switch (driverShareType) { case "iscsi": @@ -1413,7 +1412,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { command ); - response = await sshClient.exec(command); + let response = await sshClient.exec(command); if (response.code != 0) { throw new GrpcError( grpc.status.UNKNOWN, From 455c0f8b3fdd3e2a541616b1074b11f0c3c08e08 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Mon, 30 Nov 2020 23:17:35 -0700 Subject: [PATCH 14/20] crazy create race condition logic --- src/driver/freenas/index.js | 91 +++++++++++++++++++++++++++++++++---- 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index cef7035..224d58a 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -79,18 +79,25 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } async findResourceByProperties(endpoint, match) { - if (!match || Object.keys(match).length < 1) { + if (!match) { return; } + + if (typeof match === "object" && Object.keys(match).length < 1) { + return; + } + const httpClient = await this.getHttpClient(); let target; let page = 0; + let lastReponse; // loop and find target let queryParams = {}; // TODO: relax this using getSystemVersion perhaps // https://jira.ixsystems.com/browse/NAS-103916 - if (httpClient.getApiVersion() == 1) { + // NOTE: if using apiVersion 2 with 11.2 you will have issues + if (httpClient.getApiVersion() == 1 || httpClient.getApiVersion() == 2) { queryParams.limit = 100; queryParams.offset = 0; } @@ -102,7 +109,14 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { queryParams.offset = queryParams.limit * page; } + // crude stoppage attempt let response = await httpClient.get(endpoint, queryParams); + if (lastReponse) { + if (JSON.stringify(lastReponse) == JSON.stringify(response)) { + break; + } + } + lastReponse = response; if (response.statusCode == 200) { if (response.body.length < 1) { @@ -110,10 +124,15 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } response.body.some((i) => { let isMatch = true; - for (let property in match) { - if (match[property] != i[property]) { - isMatch = false; - break; + + if (typeof match === "function") { + isMatch = match(i); + } else { + for (let property in match) { + if (match[property] != i[property]) { + isMatch = false; + break; + } } } @@ -217,6 +236,26 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { * v2 = 200 */ if ([200, 201].includes(response.statusCode)) { + let sharePaths; + switch (apiVersion) { + case 1: + sharePaths = response.body.nfs_paths; + break; + case 2: + sharePaths = response.body.paths; + break; + } + + // FreeNAS responding with bad data + if (!sharePaths.includes(properties.mountpoint.value)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS responded with incorrect share data: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + //set zfs property await zb.zfs.set(datasetName, { [FREENAS_NFS_SHARE_PROPERTY_NAME]: response.body.id, @@ -228,11 +267,41 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { */ if ( [409, 422].includes(response.statusCode) && - JSON.stringify(response.body).includes( + (JSON.stringify(response.body).includes( "You can't share same filesystem with all hosts twice." - ) + ) || + JSON.stringify(response.body).includes( + "Another NFS share already exports this dataset for some network" + )) ) { - // move along + let lookupShare = await this.findResourceByProperties( + "/sharing/nfs", + (item) => { + if ( + (item.nfs_paths && + item.nfs_paths.includes( + properties.mountpoint.value + )) || + (item.paths && + item.paths.includes(properties.mountpoint.value)) + ) { + return true; + } + return false; + } + ); + + if (!lookupShare) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS failed to find matching share` + ); + } + + //set zfs property + await zb.zfs.set(datasetName, { + [FREENAS_NFS_SHARE_PROPERTY_NAME]: lookupShare.id, + }); } else { throw new GrpcError( grpc.status.UNKNOWN, @@ -406,6 +475,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { ) ) { // move along + // TODO: need to set the shareId here for sure } else { throw new GrpcError( grpc.status.UNKNOWN, @@ -1428,6 +1498,9 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { const systemVersion = await this.getSystemVersion(); if (systemVersion.v2) { + if ((await this.getSystemVersionMajorMinor()) == 11.2) { + return 1; + } return 2; } From 2a6590335cd386fd6e85acf79cb496706b125eb8 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Mon, 30 Nov 2020 23:19:11 -0700 Subject: [PATCH 15/20] noninteractive frontend for Dockerfile --- Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index faa14b5..3972079 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,8 @@ FROM debian:10-slim AS build - #FROM --platform=$BUILDPLATFORM debian:10-slim AS build +ENV DEBIAN_FRONTEND=noninteractive + ARG TARGETPLATFORM ARG BUILDPLATFORM @@ -41,6 +42,8 @@ RUN rm -rf docker ###################### FROM debian:10-slim +ENV DEBIAN_FRONTEND=noninteractive + ARG TARGETPLATFORM ARG BUILDPLATFORM From 716cde8f299d3b44e4b470b65388abf8e4294388 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 1 Dec 2020 00:04:29 -0700 Subject: [PATCH 16/20] more create race condition logic for iscsi --- src/driver/freenas/index.js | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index 224d58a..813a3c4 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -655,6 +655,13 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { ); } + if (target.iscsi_target_name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); // set target.id on zvol @@ -791,6 +798,14 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { `unknown error creating iscsi extent` ); } + + if (extent.iscsi_target_extent_name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi extent` + ); + } + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); await zb.zfs.set(datasetName, { @@ -933,6 +948,13 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { ); } + if (target.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); // set target.id on zvol @@ -987,6 +1009,14 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { `unknown error creating iscsi extent` ); } + + if (extent.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi extent` + ); + } + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); await zb.zfs.set(datasetName, { From 01113c827039ee8820df432caf11900ec3b9be9a Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Tue, 1 Dec 2020 16:52:59 -0700 Subject: [PATCH 17/20] attempt to fix missing group behavior for iscsi target creation --- src/driver/freenas/index.js | 139 ++++++++++++++++++++++++++++++++++++ src/driver/index.js | 20 +++++- 2 files changed, 158 insertions(+), 1 deletion(-) diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index 813a3c4..80b7e27 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -1,6 +1,7 @@ const { ControllerZfsSshBaseDriver } = require("../controller-zfs-ssh"); const { GrpcError, grpc } = require("../../utils/grpc"); const HttpClient = require("./http").Client; +const sleep = require("../../utils/general").sleep; const Handlebars = require("handlebars"); @@ -955,6 +956,47 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { ); } + // handle situations/race conditions where groups failed to be added/created on the target + // groups":[{"portal":1,"initiator":1,"auth":null,"authmethod":"NONE"},{"portal":2,"initiator":1,"auth":null,"authmethod":"NONE"}] + // TODO: this logic could be more intelligent but this should do for now as it appears in the failure scenario no groups are added + // in other words, I have never seen them invalid, only omitted so this should be enough + if (target.groups.length != targetGroups.length) { + response = await httpClient.put(`/iscsi/target/id/${target.id}`, { + groups: targetGroups, + }); + + if (response.statusCode != 200) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed setting target groups` + ); + } else { + target = response.body; + + // re-run sanity checks + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + if (target.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + + if (target.groups.length != targetGroups.length) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed setting target groups` + ); + } + } + } + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); // set target.id on zvol @@ -1524,6 +1566,103 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } } + async failedAttachHelper(call, err) { + const driverShareType = this.getDriverShareType(); + const sshClient = this.getSshClient(); + let response; + + // not fully implemented + return; + + switch (driverShareType) { + case "iscsi": + const isScale = await this.getIsScale(); + const majorMinor = await this.getSystemVersionMajorMinor(); + + // only works for BSD-based and 11.3+ + if (!isScale && majorMinor >= 11.3) { + const sudoEnabled = this.getSudoEnabled(); + const sudoPath = await this.getSudoPath(); + let command; + + //19 - encountered non-retryable iSCSI login failure + // ^ could be missing groups on the target + + //cat /var/run/ctld.pid + // ps -p | grep ctld + // ps -p `cat /var/run/ctld.pid` | grep ctld (if 0 exit status it's running, otherwise no) + + // random settle time + // this could be getting invoked by other instances of the same controller + // or other deployments of controllers in the same of different clusters + // altogether + let maxSettleTime = 10000; + let settleTime = Math.floor(Math.random() * maxSettleTime + 1); + await sleep(settleTime); + + // test if config is bad + // if so regen + command = sshClient.buildCommand("/usr/sbin/ctld", ["-d"]); + if (sudoEnabled) { + command = sudoPath + " " + command; + } + + this.ctx.logger.verbose("FailedAttachHelper command: %s", command); + + response = await sshClient.exec(command); + let configError = false; + let serviceRunning = false; + if (response.stderr.includes("configuration error")) { + configError = true; + } + + // NOTE: this will not be in the output if the config file has an error + if (response.stderr.includes("daemon already running")) { + serviceRunning = true; + } + + if (configError) { + this.ctx.logger.warn( + "FailedAttachHelper: ctld appears to have a bad configuration file, attempting to regenerate" + ); + // regen config + // midclt call etc.generate ctld + command = sshClient.buildCommand("midclt", [ + "call", + "etc.generate", + "ctld", + ]); + if (sudoEnabled) { + command = sudoPath + " " + command; + } + + this.ctx.logger.verbose("FailedAttachHelper command: %s", command); + response = await sshClient.exec(command); + + // reload service (may not be enough) + command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); + if (sudoEnabled) { + command = sudoPath + " " + command; + } + + this.ctx.logger.verbose("FailedAttachHelper command: %s", command); + response = await sshClient.exec(command); + + } + + // note, when the 'bad' state is entered, the status still shows as running + // check if service is running + // /etc/rc.d/ctld status ...exits 0 if running + //command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); + + // if service is not running attempt a restart + // /etc/rc.d/ctld restart + //command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); + } + break; + } + } + async getApiVersion() { const systemVersion = await this.getSystemVersion(); diff --git a/src/driver/index.js b/src/driver/index.js index a5ad8fa..27bc506 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -356,7 +356,15 @@ class CsiBaseDriver { nodeDB ); // login - await iscsi.iscsiadm.login(volume_context.iqn, portal); + try { + await iscsi.iscsiadm.login(volume_context.iqn, portal); + } catch (err) { + if (typeof this.failedAttachHelper === "function") { + // no need to await this + this.failedAttachHelper(call, err); + } + throw err; + } // find device name device = `/dev/disk/by-path/ip-${portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; @@ -378,6 +386,16 @@ class CsiBaseDriver { let current_time = Math.round(new Date().getTime() / 1000); if (!result && current_time - timer_start > timer_max) { + if (typeof this.failedAttachHelper === "function") { + // no need to await this + this.failedAttachHelper( + call, + new Error( + `hit timeout waiting for device node to appear: ${device}` + ) + ); + } + driver.ctx.logger.warn( `hit timeout waiting for device node to appear: ${device}` ); From 80abc76f66248e28581948ff5c08bc81b1a1a6ad Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Wed, 2 Dec 2020 18:37:31 -0700 Subject: [PATCH 18/20] improved logic for the various race conditions from FreeNAS api --- Dockerfile | 12 +- contrib/ctld-config-watchdog.sh | 19 + contrib/ctld-service-watchdog.sh | 16 + docker/mount | 7 + docker/umount | 7 + src/driver/freenas/index.js | 1255 +++++++++++++++--------------- src/driver/index.js | 20 +- 7 files changed, 683 insertions(+), 653 deletions(-) create mode 100644 contrib/ctld-config-watchdog.sh create mode 100644 contrib/ctld-service-watchdog.sh create mode 100644 docker/mount create mode 100644 docker/umount diff --git a/Dockerfile b/Dockerfile index 3972079..66655eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -59,8 +59,10 @@ ENV PATH=/usr/local/lib/nodejs/bin:$PATH COPY --from=build /usr/local/lib/nodejs /usr/local/lib/nodejs # node service requirements +# netbase is required by rpcbind/rpcinfo to work properly +# /etc/{services,rpc} are required RUN apt-get update && \ - apt-get install -y e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ + apt-get install -y netbase e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ rm -rf /var/lib/apt/lists/* # controller requirements @@ -75,6 +77,14 @@ RUN chmod +x /usr/local/sbin/iscsiadm ADD docker/multipath /usr/local/sbin RUN chmod +x /usr/local/sbin/multipath +## USE_HOST_MOUNT_TOOLS=1 +ADD docker/mount /usr/local/bin/mount +RUN chmod +x /usr/local/bin/mount + +## USE_HOST_MOUNT_TOOLS=1 +ADD docker/umount /usr/local/bin/umount +RUN chmod +x /usr/local/bin/umount + # Run as a non-root user RUN useradd --create-home csi \ && chown -R csi: /home/csi diff --git a/contrib/ctld-config-watchdog.sh b/contrib/ctld-config-watchdog.sh new file mode 100644 index 0000000..4582630 --- /dev/null +++ b/contrib/ctld-config-watchdog.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# under certain circumstances high concurrency requests to the FreeNAS/TrueNAS +# API can result in an invalid /etc/ctl.conf written to disk +# this script attempts to mitigate those failures by forcing a rebuild of the +# file using info strictly from the sqlite DB + +# can test with this +# logger -t ctld "error in configuration file" + +while [ 1 ]; do + egrep -m 1 "ctld.*error in configuration file" <(tail -n 0 -F /var/log/messages) &>/dev/null + + echo "regen ctld config" + midclt call etc.generate ctld &>/dev/null + + echo "reload ctld service" + /etc/rc.d/ctld reload &>/dev/null +done diff --git a/contrib/ctld-service-watchdog.sh b/contrib/ctld-service-watchdog.sh new file mode 100644 index 0000000..a6152bf --- /dev/null +++ b/contrib/ctld-service-watchdog.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# watch the ctld pid file and ensure the service is actually running + +while [ 1 ]; do + sleep 5 + ps -p $(cat /var/run/ctld.pid) | grep ctld &>/dev/null || { + echo "ctld not running, restarting" + + echo "regen ctld config" + midclt call etc.generate ctld &>/dev/null + + echo "restart ctld service" + /etc/rc.d/ctld restart &>/dev/null + } +done diff --git a/docker/mount b/docker/mount new file mode 100644 index 0000000..b53e3b2 --- /dev/null +++ b/docker/mount @@ -0,0 +1,7 @@ +#!/bin/bash + +if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]];then + chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" mount "${@:1}" +else + /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" mount "${@:1}" +fi diff --git a/docker/umount b/docker/umount new file mode 100644 index 0000000..27b11d6 --- /dev/null +++ b/docker/umount @@ -0,0 +1,7 @@ +#!/bin/bash + +if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]];then + chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" umount "${@:1}" +else + /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" umount "${@:1}" +fi diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js index 80b7e27..64ce63a 100644 --- a/src/driver/freenas/index.js +++ b/src/driver/freenas/index.js @@ -1,7 +1,6 @@ const { ControllerZfsSshBaseDriver } = require("../controller-zfs-ssh"); const { GrpcError, grpc } = require("../../utils/grpc"); const HttpClient = require("./http").Client; -const sleep = require("../../utils/general").sleep; const Handlebars = require("handlebars"); @@ -170,6 +169,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { const apiVersion = httpClient.getApiVersion(); const zb = await this.getZetabyte(); + let volume_context; let properties; let endpoint; let response; @@ -312,29 +312,27 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { ); } } - - let volume_context = { - node_attach_driver: "nfs", - server: this.options.nfs.shareHost, - share: properties.mountpoint.value, - }; - return volume_context; - + break; default: throw new GrpcError( grpc.status.FAILED_PRECONDITION, `invalid configuration: unknown apiVersion ${apiVersion}` ); } - } else { - let volume_context = { - node_attach_driver: "nfs", - server: this.options.nfs.shareHost, - share: properties.mountpoint.value, - }; - return volume_context; } + + volume_context = { + node_attach_driver: "nfs", + server: this.options.nfs.shareHost, + share: properties.mountpoint.value, + }; + return volume_context; + break; + /** + * TODO: smb need to be more defensive like iscsi and nfs + * ensuring the path is valid and the shareName + */ case "smb": properties = await zb.zfs.get(datasetName, [ "mountpoint", @@ -460,6 +458,38 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { * v2 = 200 */ if ([200, 201].includes(response.statusCode)) { + share = response.body; + let sharePath; + let shareName; + switch (apiVersion) { + case 1: + sharePath = response.body.cifs_path; + shareName = response.body.cifs_name; + break; + case 2: + sharePath = response.body.path; + shareName = response.body.name; + break; + } + + if (shareName != smbName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS responded with incorrect share data: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + + if (sharePath != properties.mountpoint.value) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS responded with incorrect share data: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + //set zfs property await zb.zfs.set(datasetName, { [FREENAS_SMB_SHARE_PROPERTY_NAME]: response.body.id, @@ -472,11 +502,39 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { if ( [409, 422].includes(response.statusCode) && JSON.stringify(response.body).includes( - "You can't share same filesystem with all hosts twice." + "A share with this name already exists." ) ) { - // move along - // TODO: need to set the shareId here for sure + let lookupShare = await this.findResourceByProperties( + endpoint, + (item) => { + if ( + (item.cifs_path && + item.cifs_path == properties.mountpoint.value && + item.cifs_name && + item.cifs_name == smbName) || + (item.path && + item.path == properties.mountpoint.value && + item.name && + item.name == smbName) + ) { + return true; + } + return false; + } + ); + + if (!lookupShare) { + throw new GrpcError( + grpc.status.UNKNOWN, + `FreeNAS failed to find matching share` + ); + } + + //set zfs property + await zb.zfs.set(datasetName, { + [FREENAS_SMB_SHARE_PROPERTY_NAME]: lookupShare.id, + }); } else { throw new GrpcError( grpc.status.UNKNOWN, @@ -486,28 +544,22 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { ); } } - - let volume_context = { - node_attach_driver: "smb", - server: this.options.smb.shareHost, - share: smbName, - }; - return volume_context; - + break; default: throw new GrpcError( grpc.status.FAILED_PRECONDITION, `invalid configuration: unknown apiVersion ${apiVersion}` ); } - } else { - let volume_context = { - node_attach_driver: "smb", - server: this.options.smb.shareHost, - share: smbName, - }; - return volume_context; } + + volume_context = { + node_attach_driver: "smb", + server: this.options.smb.shareHost, + share: smbName, + }; + return volume_context; + break; case "iscsi": properties = await zb.zfs.get(datasetName, [ @@ -599,7 +651,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } switch (apiVersion) { - case 1: { + case 1: response = await httpClient.get( "/services/iscsi/globalconfiguration" ); @@ -613,270 +665,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } basename = response.body.iscsi_basename; this.ctx.logger.verbose("FreeNAS ISCSI BASENAME: " + basename); - - // create target - let target = { - iscsi_target_name: iscsiName, - iscsi_target_alias: "", // TODO: allow template for this - }; - - response = await httpClient.post("/services/iscsi/target", target); - - // 409 if invalid - if (response.statusCode != 201) { - target = null; - if ( - response.statusCode == 409 && - JSON.stringify(response.body).includes( - "Target name already exists" - ) - ) { - target = await this.findResourceByProperties( - "/services/iscsi/target", - { - iscsi_target_name: iscsiName, - } - ); - } else { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error creating iscsi target - code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); - } - } else { - target = response.body; - } - - if (!target) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi target` - ); - } - - if (target.iscsi_target_name != iscsiName) { - throw new GrpcError( - grpc.status.UNKNOWN, - `mismatch name error creating iscsi target` - ); - } - - this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); - - // set target.id on zvol - await zb.zfs.set(datasetName, { - [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id, - }); - - // create targetgroup(s) - // targetgroups do have IDs - for (let targetGroupConfig of this.options.iscsi.targetGroups) { - let targetGroup = { - iscsi_target: target.id, - iscsi_target_authgroup: targetGroupConfig.targetGroupAuthGroup, - iscsi_target_authtype: targetGroupConfig.targetGroupAuthType - ? targetGroupConfig.targetGroupAuthType - : "None", - iscsi_target_portalgroup: - targetGroupConfig.targetGroupPortalGroup, - iscsi_target_initiatorgroup: - targetGroupConfig.targetGroupInitiatorGroup, - iscsi_target_initialdigest: "Auto", - }; - response = await httpClient.post( - "/services/iscsi/targetgroup", - targetGroup - ); - - // 409 if invalid - if (response.statusCode != 201) { - targetGroup = null; - /** - * 404 gets returned with an unable to process response when the DB is corrupted (has invalid entries in essense) - * - * To resolve properly the DB should be cleaned up - * /usr/local/etc/rc.d/django stop - * /usr/local/etc/rc.d/nginx stop - * sqlite3 /data/freenas-v1.db - * - * // this deletes everything, probably not what you want - * // should have a better query to only find entries where associated assets no longer exist - * DELETE from services_iscsitargetgroups; - * - * /usr/local/etc/rc.d/django restart - * /usr/local/etc/rc.d/nginx restart - */ - if ( - response.statusCode == 404 || - (response.statusCode == 409 && - JSON.stringify(response.body).includes( - "cannot be duplicated on a target" - )) - ) { - targetGroup = await this.findResourceByProperties( - "/services/iscsi/targetgroup", - { - iscsi_target: target.id, - iscsi_target_portalgroup: - targetGroupConfig.targetGroupPortalGroup, - iscsi_target_initiatorgroup: - targetGroupConfig.targetGroupInitiatorGroup, - } - ); - } else { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error creating iscsi targetgroup - code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); - } - } else { - targetGroup = response.body; - } - - if (!targetGroup) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi targetgroup` - ); - } - - this.ctx.logger.verbose( - "FreeNAS ISCSI TARGET_GROUP: %j", - targetGroup - ); - } - - let extent = { - iscsi_target_extent_comment: "", // TODO: allow template for this value - iscsi_target_extent_type: "Disk", // Disk/File, after save Disk becomes "ZVOL" - iscsi_target_extent_name: iscsiName, - iscsi_target_extent_insecure_tpc: extentInsecureTpc, - //iscsi_target_extent_naa: "0x3822690834aae6c5", - iscsi_target_extent_disk: extentDiskName, - iscsi_target_extent_xen: extentXenCompat, - iscsi_target_extent_avail_threshold: extentAvailThreshold, - iscsi_target_extent_blocksize: Number(extentBlocksize), - iscsi_target_extent_pblocksize: extentDisablePhysicalBlocksize, - iscsi_target_extent_rpm: isNaN(Number(extentRpm)) - ? "SSD" - : Number(extentRpm), - iscsi_target_extent_ro: false, - }; - response = await httpClient.post("/services/iscsi/extent", extent); - - // 409 if invalid - if (response.statusCode != 201) { - extent = null; - if ( - response.statusCode == 409 && - JSON.stringify(response.body).includes( - "Extent name must be unique" - ) - ) { - extent = await this.findResourceByProperties( - "/services/iscsi/extent", - { iscsi_target_extent_name: iscsiName } - ); - } else { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error creating iscsi extent - code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); - } - } else { - extent = response.body; - } - - if (!extent) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi extent` - ); - } - - if (extent.iscsi_target_extent_name != iscsiName) { - throw new GrpcError( - grpc.status.UNKNOWN, - `mismatch name error creating iscsi extent` - ); - } - - this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); - - await zb.zfs.set(datasetName, { - [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id, - }); - - // create targettoextent - let targetToExtent = { - iscsi_target: target.id, - iscsi_extent: extent.id, - iscsi_lunid: 0, - }; - response = await httpClient.post( - "/services/iscsi/targettoextent", - targetToExtent - ); - - // 409 if invalid - if (response.statusCode != 201) { - targetToExtent = null; - - // LUN ID is already being used for this target. - // Extent is already in this target. - if ( - response.statusCode == 409 && - (JSON.stringify(response.body).includes( - "Extent is already in this target." - ) || - JSON.stringify(response.body).includes( - "LUN ID is already being used for this target." - )) - ) { - targetToExtent = await this.findResourceByProperties( - "/services/iscsi/targettoextent", - { - iscsi_target: target.id, - iscsi_extent: extent.id, - iscsi_lunid: 0, - } - ); - } else { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error creating iscsi targettoextent - code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); - } - } else { - targetToExtent = response.body; - } - - if (!targetToExtent) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi targettoextent` - ); - } - this.ctx.logger.verbose( - "FreeNAS ISCSI TARGET_TO_EXTENT: %j", - targetToExtent - ); - - await zb.zfs.set(datasetName, { - [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: - targetToExtent.id, - }); - break; - } case 2: response = await httpClient.get("/iscsi/global"); if (response.statusCode != 200) { @@ -889,243 +678,6 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } basename = response.body.basename; this.ctx.logger.verbose("FreeNAS ISCSI BASENAME: " + basename); - - // create target and targetgroup - //let targetId; - let targetGroups = []; - for (let targetGroupConfig of this.options.iscsi.targetGroups) { - targetGroups.push({ - portal: targetGroupConfig.targetGroupPortalGroup, - initiator: targetGroupConfig.targetGroupInitiatorGroup, - auth: - targetGroupConfig.targetGroupAuthGroup > 0 - ? targetGroupConfig.targetGroupAuthGroup - : null, - authmethod: - targetGroupConfig.targetGroupAuthType.length > 0 - ? targetGroupConfig.targetGroupAuthType - .toUpperCase() - .replace(" ", "_") - : "NONE", - }); - } - let target = { - name: iscsiName, - alias: null, // cannot send "" error: handler error - driver: FreeNASDriver method: CreateVolume error: {"name":"GrpcError","code":2,"message":"received error creating iscsi target - code: 422 body: {\"iscsi_target_create.alias\":[{\"message\":\"Alias already exists\",\"errno\":22}]}"} - mode: "ISCSI", - groups: targetGroups, - }; - - response = await httpClient.post("/iscsi/target", target); - - // 409 if invalid - if (response.statusCode != 200) { - target = null; - if ( - response.statusCode == 422 && - JSON.stringify(response.body).includes( - "Target name already exists" - ) - ) { - target = await this.findResourceByProperties("/iscsi/target", { - name: iscsiName, - }); - } else { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error creating iscsi target - code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); - } - } else { - target = response.body; - } - - if (!target) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi target` - ); - } - - if (target.name != iscsiName) { - throw new GrpcError( - grpc.status.UNKNOWN, - `mismatch name error creating iscsi target` - ); - } - - // handle situations/race conditions where groups failed to be added/created on the target - // groups":[{"portal":1,"initiator":1,"auth":null,"authmethod":"NONE"},{"portal":2,"initiator":1,"auth":null,"authmethod":"NONE"}] - // TODO: this logic could be more intelligent but this should do for now as it appears in the failure scenario no groups are added - // in other words, I have never seen them invalid, only omitted so this should be enough - if (target.groups.length != targetGroups.length) { - response = await httpClient.put(`/iscsi/target/id/${target.id}`, { - groups: targetGroups, - }); - - if (response.statusCode != 200) { - throw new GrpcError( - grpc.status.UNKNOWN, - `failed setting target groups` - ); - } else { - target = response.body; - - // re-run sanity checks - if (!target) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi target` - ); - } - - if (target.name != iscsiName) { - throw new GrpcError( - grpc.status.UNKNOWN, - `mismatch name error creating iscsi target` - ); - } - - if (target.groups.length != targetGroups.length) { - throw new GrpcError( - grpc.status.UNKNOWN, - `failed setting target groups` - ); - } - } - } - - this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); - - // set target.id on zvol - await zb.zfs.set(datasetName, { - [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id, - }); - - let extent = { - comment: "", // TODO: allow this to be templated - type: "DISK", // Disk/File, after save Disk becomes "ZVOL" - name: iscsiName, - //iscsi_target_extent_naa: "0x3822690834aae6c5", - disk: extentDiskName, - insecure_tpc: extentInsecureTpc, - xen: extentXenCompat, - avail_threshold: extentAvailThreshold, - blocksize: Number(extentBlocksize), - pblocksize: extentDisablePhysicalBlocksize, - rpm: "" + extentRpm, // should be a string - ro: false, - }; - - response = await httpClient.post("/iscsi/extent", extent); - - // 409 if invalid - if (response.statusCode != 200) { - extent = null; - if ( - response.statusCode == 422 && - JSON.stringify(response.body).includes( - "Extent name must be unique" - ) - ) { - extent = await this.findResourceByProperties("/iscsi/extent", { - name: iscsiName, - }); - } else { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error creating iscsi extent - code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); - } - } else { - extent = response.body; - } - - if (!extent) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi extent` - ); - } - - if (extent.name != iscsiName) { - throw new GrpcError( - grpc.status.UNKNOWN, - `mismatch name error creating iscsi extent` - ); - } - - this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); - - await zb.zfs.set(datasetName, { - [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id, - }); - - // create targettoextent - let targetToExtent = { - target: target.id, - extent: extent.id, - lunid: 0, - }; - response = await httpClient.post( - "/iscsi/targetextent", - targetToExtent - ); - - if (response.statusCode != 200) { - targetToExtent = null; - - // LUN ID is already being used for this target. - // Extent is already in this target. - if ( - response.statusCode == 422 && - (JSON.stringify(response.body).includes( - "Extent is already in this target." - ) || - JSON.stringify(response.body).includes( - "LUN ID is already being used for this target." - )) - ) { - targetToExtent = await this.findResourceByProperties( - "/iscsi/targetextent", - { - target: target.id, - extent: extent.id, - lunid: 0, - } - ); - } else { - throw new GrpcError( - grpc.status.UNKNOWN, - `received error creating iscsi targetextent - code: ${ - response.statusCode - } body: ${JSON.stringify(response.body)}` - ); - } - } else { - targetToExtent = response.body; - } - - if (!targetToExtent) { - throw new GrpcError( - grpc.status.UNKNOWN, - `unknown error creating iscsi targetextent` - ); - } - this.ctx.logger.verbose( - "FreeNAS ISCSI TARGET_TO_EXTENT: %j", - targetToExtent - ); - - await zb.zfs.set(datasetName, { - [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: - targetToExtent.id, - }); - break; default: throw new GrpcError( @@ -1134,6 +686,540 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { ); } + // if we got all the way to the TARGETTOEXTENT then we fully finished + // otherwise we must do all assets every time due to the interdependence of IDs etc + if ( + !zb.helpers.isPropertyValueSet( + properties[FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME].value + ) + ) { + switch (apiVersion) { + case 1: { + // create target + let target = { + iscsi_target_name: iscsiName, + iscsi_target_alias: "", // TODO: allow template for this + }; + + response = await httpClient.post( + "/services/iscsi/target", + target + ); + + // 409 if invalid + if (response.statusCode != 201) { + target = null; + if ( + response.statusCode == 409 && + JSON.stringify(response.body).includes( + "Target name already exists" + ) + ) { + target = await this.findResourceByProperties( + "/services/iscsi/target", + { + iscsi_target_name: iscsiName, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi target - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + target = response.body; + } + + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + if (target.iscsi_target_name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); + + // set target.id on zvol + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id, + }); + + // create targetgroup(s) + // targetgroups do have IDs + for (let targetGroupConfig of this.options.iscsi.targetGroups) { + let targetGroup = { + iscsi_target: target.id, + iscsi_target_authgroup: + targetGroupConfig.targetGroupAuthGroup, + iscsi_target_authtype: targetGroupConfig.targetGroupAuthType + ? targetGroupConfig.targetGroupAuthType + : "None", + iscsi_target_portalgroup: + targetGroupConfig.targetGroupPortalGroup, + iscsi_target_initiatorgroup: + targetGroupConfig.targetGroupInitiatorGroup, + iscsi_target_initialdigest: "Auto", + }; + response = await httpClient.post( + "/services/iscsi/targetgroup", + targetGroup + ); + + // 409 if invalid + if (response.statusCode != 201) { + targetGroup = null; + /** + * 404 gets returned with an unable to process response when the DB is corrupted (has invalid entries in essense) + * + * To resolve properly the DB should be cleaned up + * /usr/local/etc/rc.d/django stop + * /usr/local/etc/rc.d/nginx stop + * sqlite3 /data/freenas-v1.db + * + * // this deletes everything, probably not what you want + * // should have a better query to only find entries where associated assets no longer exist + * DELETE from services_iscsitargetgroups; + * + * /usr/local/etc/rc.d/django restart + * /usr/local/etc/rc.d/nginx restart + */ + if ( + response.statusCode == 404 || + (response.statusCode == 409 && + JSON.stringify(response.body).includes( + "cannot be duplicated on a target" + )) + ) { + targetGroup = await this.findResourceByProperties( + "/services/iscsi/targetgroup", + { + iscsi_target: target.id, + iscsi_target_portalgroup: + targetGroupConfig.targetGroupPortalGroup, + iscsi_target_initiatorgroup: + targetGroupConfig.targetGroupInitiatorGroup, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targetgroup - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetGroup = response.body; + } + + if (!targetGroup) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targetgroup` + ); + } + + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_GROUP: %j", + targetGroup + ); + } + + let extent = { + iscsi_target_extent_comment: "", // TODO: allow template for this value + iscsi_target_extent_type: "Disk", // Disk/File, after save Disk becomes "ZVOL" + iscsi_target_extent_name: iscsiName, + iscsi_target_extent_insecure_tpc: extentInsecureTpc, + //iscsi_target_extent_naa: "0x3822690834aae6c5", + iscsi_target_extent_disk: extentDiskName, + iscsi_target_extent_xen: extentXenCompat, + iscsi_target_extent_avail_threshold: extentAvailThreshold, + iscsi_target_extent_blocksize: Number(extentBlocksize), + iscsi_target_extent_pblocksize: extentDisablePhysicalBlocksize, + iscsi_target_extent_rpm: isNaN(Number(extentRpm)) + ? "SSD" + : Number(extentRpm), + iscsi_target_extent_ro: false, + }; + response = await httpClient.post( + "/services/iscsi/extent", + extent + ); + + // 409 if invalid + if (response.statusCode != 201) { + extent = null; + if ( + response.statusCode == 409 && + JSON.stringify(response.body).includes( + "Extent name must be unique" + ) + ) { + extent = await this.findResourceByProperties( + "/services/iscsi/extent", + { iscsi_target_extent_name: iscsiName } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi extent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + extent = response.body; + } + + if (!extent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi extent` + ); + } + + if (extent.iscsi_target_extent_name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi extent` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id, + }); + + // create targettoextent + let targetToExtent = { + iscsi_target: target.id, + iscsi_extent: extent.id, + iscsi_lunid: 0, + }; + response = await httpClient.post( + "/services/iscsi/targettoextent", + targetToExtent + ); + + // 409 if invalid + if (response.statusCode != 201) { + targetToExtent = null; + + // LUN ID is already being used for this target. + // Extent is already in this target. + if ( + response.statusCode == 409 && + (JSON.stringify(response.body).includes( + "Extent is already in this target." + ) || + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + )) + ) { + targetToExtent = await this.findResourceByProperties( + "/services/iscsi/targettoextent", + { + iscsi_target: target.id, + iscsi_extent: extent.id, + iscsi_lunid: 0, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targettoextent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetToExtent = response.body; + } + + if (!targetToExtent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targettoextent` + ); + } + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_TO_EXTENT: %j", + targetToExtent + ); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: + targetToExtent.id, + }); + + break; + } + case 2: + // create target and targetgroup + //let targetId; + let targetGroups = []; + for (let targetGroupConfig of this.options.iscsi.targetGroups) { + targetGroups.push({ + portal: targetGroupConfig.targetGroupPortalGroup, + initiator: targetGroupConfig.targetGroupInitiatorGroup, + auth: + targetGroupConfig.targetGroupAuthGroup > 0 + ? targetGroupConfig.targetGroupAuthGroup + : null, + authmethod: + targetGroupConfig.targetGroupAuthType.length > 0 + ? targetGroupConfig.targetGroupAuthType + .toUpperCase() + .replace(" ", "_") + : "NONE", + }); + } + let target = { + name: iscsiName, + alias: null, // cannot send "" error: handler error - driver: FreeNASDriver method: CreateVolume error: {"name":"GrpcError","code":2,"message":"received error creating iscsi target - code: 422 body: {\"iscsi_target_create.alias\":[{\"message\":\"Alias already exists\",\"errno\":22}]}"} + mode: "ISCSI", + groups: targetGroups, + }; + + response = await httpClient.post("/iscsi/target", target); + + // 409 if invalid + if (response.statusCode != 200) { + target = null; + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes( + "Target name already exists" + ) + ) { + target = await this.findResourceByProperties( + "/iscsi/target", + { + name: iscsiName, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi target - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + target = response.body; + } + + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + if (target.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + + // handle situations/race conditions where groups failed to be added/created on the target + // groups":[{"portal":1,"initiator":1,"auth":null,"authmethod":"NONE"},{"portal":2,"initiator":1,"auth":null,"authmethod":"NONE"}] + // TODO: this logic could be more intelligent but this should do for now as it appears in the failure scenario no groups are added + // in other words, I have never seen them invalid, only omitted so this should be enough + if (target.groups.length != targetGroups.length) { + response = await httpClient.put( + `/iscsi/target/id/${target.id}`, + { + groups: targetGroups, + } + ); + + if (response.statusCode != 200) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed setting target groups` + ); + } else { + target = response.body; + + // re-run sanity checks + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + if (target.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi target` + ); + } + + if (target.groups.length != targetGroups.length) { + throw new GrpcError( + grpc.status.UNKNOWN, + `failed setting target groups` + ); + } + } + } + + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); + + // set target.id on zvol + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id, + }); + + let extent = { + comment: "", // TODO: allow this to be templated + type: "DISK", // Disk/File, after save Disk becomes "ZVOL" + name: iscsiName, + //iscsi_target_extent_naa: "0x3822690834aae6c5", + disk: extentDiskName, + insecure_tpc: extentInsecureTpc, + xen: extentXenCompat, + avail_threshold: extentAvailThreshold, + blocksize: Number(extentBlocksize), + pblocksize: extentDisablePhysicalBlocksize, + rpm: "" + extentRpm, // should be a string + ro: false, + }; + + response = await httpClient.post("/iscsi/extent", extent); + + // 409 if invalid + if (response.statusCode != 200) { + extent = null; + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes( + "Extent name must be unique" + ) + ) { + extent = await this.findResourceByProperties( + "/iscsi/extent", + { + name: iscsiName, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi extent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + extent = response.body; + } + + if (!extent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi extent` + ); + } + + if (extent.name != iscsiName) { + throw new GrpcError( + grpc.status.UNKNOWN, + `mismatch name error creating iscsi extent` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id, + }); + + // create targettoextent + let targetToExtent = { + target: target.id, + extent: extent.id, + lunid: 0, + }; + response = await httpClient.post( + "/iscsi/targetextent", + targetToExtent + ); + + if (response.statusCode != 200) { + targetToExtent = null; + + // LUN ID is already being used for this target. + // Extent is already in this target. + if ( + response.statusCode == 422 && + (JSON.stringify(response.body).includes( + "Extent is already in this target." + ) || + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + )) + ) { + targetToExtent = await this.findResourceByProperties( + "/iscsi/targetextent", + { + target: target.id, + extent: extent.id, + lunid: 0, + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targetextent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetToExtent = response.body; + } + + if (!targetToExtent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targetextent` + ); + } + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_TO_EXTENT: %j", + targetToExtent + ); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: + targetToExtent.id, + }); + + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } + // iqn = target let iqn = basename + ":" + iscsiName; this.ctx.logger.info("FreeNAS iqn: " + iqn); @@ -1157,7 +1243,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { // iqn // lun - let volume_context = { + volume_context = { node_attach_driver: "iscsi", portal: this.options.iscsi.targetPortal, portals: this.options.iscsi.targetPortals.join(","), @@ -1566,103 +1652,6 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver { } } - async failedAttachHelper(call, err) { - const driverShareType = this.getDriverShareType(); - const sshClient = this.getSshClient(); - let response; - - // not fully implemented - return; - - switch (driverShareType) { - case "iscsi": - const isScale = await this.getIsScale(); - const majorMinor = await this.getSystemVersionMajorMinor(); - - // only works for BSD-based and 11.3+ - if (!isScale && majorMinor >= 11.3) { - const sudoEnabled = this.getSudoEnabled(); - const sudoPath = await this.getSudoPath(); - let command; - - //19 - encountered non-retryable iSCSI login failure - // ^ could be missing groups on the target - - //cat /var/run/ctld.pid - // ps -p | grep ctld - // ps -p `cat /var/run/ctld.pid` | grep ctld (if 0 exit status it's running, otherwise no) - - // random settle time - // this could be getting invoked by other instances of the same controller - // or other deployments of controllers in the same of different clusters - // altogether - let maxSettleTime = 10000; - let settleTime = Math.floor(Math.random() * maxSettleTime + 1); - await sleep(settleTime); - - // test if config is bad - // if so regen - command = sshClient.buildCommand("/usr/sbin/ctld", ["-d"]); - if (sudoEnabled) { - command = sudoPath + " " + command; - } - - this.ctx.logger.verbose("FailedAttachHelper command: %s", command); - - response = await sshClient.exec(command); - let configError = false; - let serviceRunning = false; - if (response.stderr.includes("configuration error")) { - configError = true; - } - - // NOTE: this will not be in the output if the config file has an error - if (response.stderr.includes("daemon already running")) { - serviceRunning = true; - } - - if (configError) { - this.ctx.logger.warn( - "FailedAttachHelper: ctld appears to have a bad configuration file, attempting to regenerate" - ); - // regen config - // midclt call etc.generate ctld - command = sshClient.buildCommand("midclt", [ - "call", - "etc.generate", - "ctld", - ]); - if (sudoEnabled) { - command = sudoPath + " " + command; - } - - this.ctx.logger.verbose("FailedAttachHelper command: %s", command); - response = await sshClient.exec(command); - - // reload service (may not be enough) - command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); - if (sudoEnabled) { - command = sudoPath + " " + command; - } - - this.ctx.logger.verbose("FailedAttachHelper command: %s", command); - response = await sshClient.exec(command); - - } - - // note, when the 'bad' state is entered, the status still shows as running - // check if service is running - // /etc/rc.d/ctld status ...exits 0 if running - //command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); - - // if service is not running attempt a restart - // /etc/rc.d/ctld restart - //command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]); - } - break; - } - } - async getApiVersion() { const systemVersion = await this.getSystemVersion(); diff --git a/src/driver/index.js b/src/driver/index.js index 27bc506..a5ad8fa 100644 --- a/src/driver/index.js +++ b/src/driver/index.js @@ -356,15 +356,7 @@ class CsiBaseDriver { nodeDB ); // login - try { - await iscsi.iscsiadm.login(volume_context.iqn, portal); - } catch (err) { - if (typeof this.failedAttachHelper === "function") { - // no need to await this - this.failedAttachHelper(call, err); - } - throw err; - } + await iscsi.iscsiadm.login(volume_context.iqn, portal); // find device name device = `/dev/disk/by-path/ip-${portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; @@ -386,16 +378,6 @@ class CsiBaseDriver { let current_time = Math.round(new Date().getTime() / 1000); if (!result && current_time - timer_start > timer_max) { - if (typeof this.failedAttachHelper === "function") { - // no need to await this - this.failedAttachHelper( - call, - new Error( - `hit timeout waiting for device node to appear: ${device}` - ) - ); - } - driver.ctx.logger.warn( `hit timeout waiting for device node to appear: ${device}` ); From 78ef440d2b38669512097dc63a07540ba8418f91 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 3 Dec 2020 14:26:08 -0700 Subject: [PATCH 19/20] sample watchdog using db data, add socat to image for testing scenarios --- .dockerignore | 1 + Dockerfile | 2 +- contrib/ctld-config-watchdog-db.sh | 54 ++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 contrib/ctld-config-watchdog-db.sh diff --git a/.dockerignore b/.dockerignore index 22353b8..3a13573 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,7 @@ chart dev examples +contrib node_modules Dockerfile* TODO.md diff --git a/Dockerfile b/Dockerfile index 66655eb..d8bedbe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -62,7 +62,7 @@ COPY --from=build /usr/local/lib/nodejs /usr/local/lib/nodejs # netbase is required by rpcbind/rpcinfo to work properly # /etc/{services,rpc} are required RUN apt-get update && \ - apt-get install -y netbase e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ + apt-get install -y netbase socat e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \ rm -rf /var/lib/apt/lists/* # controller requirements diff --git a/contrib/ctld-config-watchdog-db.sh b/contrib/ctld-config-watchdog-db.sh new file mode 100644 index 0000000..9d55bff --- /dev/null +++ b/contrib/ctld-config-watchdog-db.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +WAIT_TIME_SECS=30 +USERNAME="root" +PASSWORD="secret" +BASE_URL="http://localhost" +LIMIT=1000 + +while [ 1 ]; do + sleep "${WAIT_TIME_SECS}" + + # ctl extents + CTL_EXTENT_COUNT=$(ctladm devlist | tail -n +2 | wc -l | sed 's/^[ \t]*//;s/[ \t]*$//') + echo "ctl extent count: ${CTL_EXTENT_COUNT}" + + # ctl luns + CTL_LUN_COUNT=$(ctladm lunlist | wc -l | sed 's/^[ \t]*//;s/[ \t]*$//') + echo "ctl lun count: ${CTL_LUN_COUNT}" + + # db targets + DB_TARGET_COUNT=$(curl --user "${USERNAME}:${PASSWORD}" "${BASE_URL}/api/v2.0/iscsi/target?limit=${LIMIT}" 2>/dev/null | jq length) + echo "DB target count: ${DB_TARGET_COUNT}" + + # db extents + DB_EXTENT_COUNT=$(curl --user "${USERNAME}:${PASSWORD}" "${BASE_URL}/api/v2.0/iscsi/extent?limit=${LIMIT}" 2>/dev/null | jq length) + echo "DB extent count: ${DB_EXTENT_COUNT}" + + # db luns + DB_LUN_COUNT=$(curl --user "${USERNAME}:${PASSWORD}" "${BASE_URL}/api/v2.0/iscsi/targetextent?limit=${LIMIT}" 2>/dev/null | jq length) + echo "DB lun count: ${DB_LUN_COUNT}" + + REGEN=0 + + if [[ ${CTL_TARGET_COUNT} -ne ${DB_TARGET_COUNT} ]]; then + REGEN=1 + fi + + if [[ ${CTL_EXTENT_COUNT} -ne ${DB_EXTENT_COUNT} ]]; then + REGEN=1 + fi + + if [[ ${CTL_LUN_COUNT} -ne ${DB_LUN_COUNT} ]]; then + REGEN=1 + fi + + if [[ ${REGEN} -eq 1 ]]; then + echo "regen ctld config" + midclt call etc.generate ctld &>/dev/null + + echo "reload ctld service" + /etc/rc.d/ctld reload &>/dev/null + fi + +done From d4ab4fa4fe6caa87703d7b6993b8923418731537 Mon Sep 17 00:00:00 2001 From: Travis Glenn Hansen Date: Thu, 3 Dec 2020 16:39:38 -0700 Subject: [PATCH 20/20] updated doc --- README.md | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 635b53e..0c0a8da 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,9 @@ You should install/configure the requirements for both nfs and iscsi. Follow the instructions here: https://netapp-trident.readthedocs.io/en/stable-v20.04/kubernetes/operations/tasks/worker.html +Note that `multipath` is supported for the `iscsi`-based drivers. Simply setup +multipath to your liking and set multiple portals in the config as appropriate. + If you are running Kubernetes with rancher/rke please see the following: - https://github.com/rancher/rke/issues/1846 @@ -71,14 +74,43 @@ Server preparation depends slightly on which `driver` you are using. ### FreeNAS (freenas-nfs, freenas-iscsi, freenas-smb) +The recommended version of FreeNAS is 11.3+, however the driver should work +with much older versions as well. + Ensure the following services are configurged and running: - ssh (if you use a password for authentication make sure it is allowed) - ensure `zsh`, `bash`, or `sh` is set as the root shell, `csh` gives false errors due to quoting - nfs - iscsi + - when using the FreeNAS API concurrently the `/etc/ctl.conf` file on the + server can become invalid, some sample scripts are provided in the + `contrib` directory to clean things up + ie: copy the script to the server and directly and run - `./ctld-config-watchdog-db.sh | logger -t ctld-config-watchdog-db.sh &` + please read the scripts and set the variables as appropriate for your server. + - ensure you have pre-emptively created portal, group, auth - smb +In addition, if you want to use a non-root user for the ssh operations you may +create a `csi` user and then run `visudo` directly from the console. Make sure +the line for the `csi` user has `NOPASSWD` added (note this can get reset by +FreeNAS if you alter the user via the GUI later): + +``` +csi ALL=(ALL) NOPASSWD:ALL +``` + +Starting with TrueNAS CORE 12 it is also possible to use an `apiKey` instead of +the `root` password for the http connection. + +Issues to review: + +- https://jira.ixsystems.com/browse/NAS-108519 +- https://jira.ixsystems.com/browse/NAS-108520 +- https://jira.ixsystems.com/browse/NAS-108521 +- https://jira.ixsystems.com/browse/NAS-108522 +- https://jira.ixsystems.com/browse/NAS-107219 + ### ZoL (zfs-generic-nfs, zfs-generic-iscsi) Ensure ssh and zfs is installed on the server and that you have installed @@ -115,9 +147,10 @@ helm upgrade \ --namespace democratic-csi \ zfs-nfs democratic-csi/democratic-csi ``` + ### A note on non standard kubelet paths -Some distrobutions, such as `minikube` and `microk8s` uses a non-standard kubelet path. +Some distrobutions, such as `minikube` and `microk8s` uses a non-standard kubelet path. In such cases it is necessary to provide a new kubelet host path, microk8s example below: ```bash