new generic driver for zfs (support nfs share via prop, iscsi via targetcli)
This commit is contained in:
parent
0e3f97877b
commit
1f8bb9322c
|
|
@ -0,0 +1,60 @@
|
|||
driver: zfs-generic-iscsi
|
||||
sshConnection:
|
||||
host: server address
|
||||
port: 22
|
||||
username: root
|
||||
# use either password or key
|
||||
password: ""
|
||||
privateKey: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
...
|
||||
-----END RSA PRIVATE KEY-----
|
||||
service:
|
||||
identity: {}
|
||||
controller: {}
|
||||
node: {}
|
||||
zfs:
|
||||
datasetParentName: tank/k8s/test
|
||||
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
||||
|
||||
# "" (inherit), lz4, gzip-9, etc
|
||||
zvolCompression:
|
||||
# "" (inherit), on, off, verify
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
|
||||
zvolBlocksize:
|
||||
|
||||
iscsi:
|
||||
shareStrategy: "targetCli"
|
||||
|
||||
# https://kifarunix.com/how-to-install-and-configure-iscsi-storage-server-on-ubuntu-18-04/
|
||||
# https://kifarunix.com/how-install-and-configure-iscsi-storage-server-on-centos-7/
|
||||
# https://linuxlasse.net/linux/howtos/ISCSI_and_ZFS_ZVOL
|
||||
# http://www.linux-iscsi.org/wiki/ISCSI
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1659195
|
||||
# http://atodorov.org/blog/2015/04/07/how-to-configure-iscsi-target-on-red-hat-enterprise-linux-7/
|
||||
shareStragetyTargetCli:
|
||||
basename: "iqn.2003-01.org.linux-iscsi.ubuntu-19.x8664"
|
||||
tpg:
|
||||
attributes:
|
||||
# set to 1 to enable CHAP
|
||||
authentication: 0
|
||||
# this is required currently as we do not register all node iqns
|
||||
# the effective outcome of this is, allow all iqns to connect
|
||||
generate_node_acls: 1
|
||||
cache_dynamic_acls: 1
|
||||
# if generate_node_acls is 1 then must turn this off as well (assuming you want write ability)
|
||||
demo_mode_write_protect: 0
|
||||
auth:
|
||||
# CHAP
|
||||
#userid: "foo"
|
||||
#password: "bar"
|
||||
# mutual CHAP
|
||||
#mutual_userid: "baz"
|
||||
#mutual_password: "bar"
|
||||
targetPortal: "server address"
|
||||
targetPortals: []
|
||||
interface: ""
|
||||
namePrefix:
|
||||
nameSuffix:
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
driver: zfs-generic-nfs
|
||||
sshConnection:
|
||||
host: server address
|
||||
port: 22
|
||||
username: root
|
||||
# use either password or key
|
||||
password: ""
|
||||
privateKey: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
...
|
||||
-----END RSA PRIVATE KEY-----
|
||||
service:
|
||||
identity: {}
|
||||
controller: {}
|
||||
node: {}
|
||||
zfs:
|
||||
datasetParentName: tank/k8s/test
|
||||
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: root
|
||||
datasetPermissionsGroup: root
|
||||
nfs:
|
||||
# https://docs.oracle.com/cd/E23824_01/html/821-1448/gayne.html
|
||||
# https://www.hiroom2.com/2016/05/18/ubuntu-16-04-share-zfs-storage-via-nfs-smb/
|
||||
shareStrategy: "setDatasetProperties"
|
||||
shareStrategySetDatasetProperties:
|
||||
properties:
|
||||
sharenfs: "on"
|
||||
# share: ""
|
||||
shareHost: "server address"
|
||||
|
|
@ -0,0 +1,263 @@
|
|||
const grpc = require("grpc");
|
||||
const { ControllerZfsSshBaseDriver } = require("../controller-zfs-ssh");
|
||||
const { GrpcError } = require("../../utils/grpc");
|
||||
|
||||
class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver {
|
||||
/**
|
||||
* cannot make this a storage class parameter as storage class/etc context is *not* sent
|
||||
* into various calls such as GetControllerCapabilities etc
|
||||
*/
|
||||
getDriverZfsResourceType() {
|
||||
switch (this.options.driver) {
|
||||
case "zfs-generic-nfs":
|
||||
return "filesystem";
|
||||
case "zfs-generic-iscsi":
|
||||
return "volume";
|
||||
default:
|
||||
throw new Error("unknown driver: " + this.ctx.args.driver);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* should create any necessary share resources
|
||||
* should set the SHARE_VOLUME_CONTEXT_PROPERTY_NAME propery
|
||||
*
|
||||
* @param {*} datasetName
|
||||
*/
|
||||
async createShare(call, datasetName) {
|
||||
const zb = this.getZetabyte();
|
||||
const sshClient = this.getSshClient();
|
||||
|
||||
let properties;
|
||||
let response;
|
||||
let share = {};
|
||||
let volume_context = {};
|
||||
|
||||
switch (this.options.driver) {
|
||||
case "zfs-generic-nfs":
|
||||
switch (this.options.nfs.shareStrategy) {
|
||||
case "setDatasetProperties":
|
||||
for (let key of ["share", "sharenfs"]) {
|
||||
if (
|
||||
this.options.nfs.shareStrategySetDatasetProperties.properties[
|
||||
key
|
||||
]
|
||||
) {
|
||||
await zb.zfs.set(datasetName, {
|
||||
[key]: this.options.nfs.shareStrategySetDatasetProperties
|
||||
.properties[key]
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
properties = await zb.zfs.get(datasetName, ["mountpoint"]);
|
||||
properties = properties[datasetName];
|
||||
this.ctx.logger.debug("zfs props data: %j", properties);
|
||||
|
||||
volume_context = {
|
||||
node_attach_driver: "nfs",
|
||||
server: this.options.nfs.shareHost,
|
||||
share: properties.mountpoint.value
|
||||
};
|
||||
return volume_context;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
let basename;
|
||||
let iscsiName = zb.helpers.extractLeafName(datasetName);
|
||||
if (this.options.iscsi.namePrefix) {
|
||||
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
||||
}
|
||||
|
||||
if (this.options.iscsi.nameSuffix) {
|
||||
iscsiName += this.options.iscsi.nameSuffix;
|
||||
}
|
||||
|
||||
iscsiName = iscsiName.toLowerCase();
|
||||
|
||||
let extentDiskName = "zvol/" + datasetName;
|
||||
|
||||
/**
|
||||
* limit is a FreeBSD limitation
|
||||
* https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
|
||||
*/
|
||||
//if (extentDiskName.length > 63) {
|
||||
// throw new GrpcError(
|
||||
// grpc.status.FAILED_PRECONDITION,
|
||||
// `extent disk name cannot exceed 63 characters: ${extentDiskName}`
|
||||
// );
|
||||
//}
|
||||
|
||||
switch (this.options.iscsi.shareStrategy) {
|
||||
case "targetCli":
|
||||
basename = this.options.iscsi.shareStragetyTargetCli.basename;
|
||||
let setAttributesText = "";
|
||||
let setAuthText = "";
|
||||
if (this.options.iscsi.shareStragetyTargetCli.tpg) {
|
||||
if (this.options.iscsi.shareStragetyTargetCli.tpg.attributes) {
|
||||
for (const attributeName in this.options.iscsi
|
||||
.shareStragetyTargetCli.tpg.attributes) {
|
||||
const attributeValue = this.options.iscsi
|
||||
.shareStragetyTargetCli.tpg.attributes[attributeName];
|
||||
setAttributesText += "\n";
|
||||
setAttributesText += `set attribute ${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.iscsi.shareStragetyTargetCli.tpg.auth) {
|
||||
for (const attributeName in this.options.iscsi
|
||||
.shareStragetyTargetCli.tpg.auth) {
|
||||
const attributeValue = this.options.iscsi
|
||||
.shareStragetyTargetCli.tpg.auth[attributeName];
|
||||
setAttributesText += "\n";
|
||||
setAttributesText += `set auth ${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = await this.targetCliCommand(
|
||||
`
|
||||
# create target
|
||||
cd /iscsi
|
||||
create ${basename}:${iscsiName}
|
||||
|
||||
# setup tpg
|
||||
cd /iscsi/${basename}:${iscsiName}/tpg1
|
||||
${setAttributesText}
|
||||
${setAuthText}
|
||||
|
||||
# create extent
|
||||
cd /backstores/block
|
||||
create ${iscsiName} /dev/${extentDiskName}
|
||||
|
||||
# add extent to target/tpg
|
||||
cd /iscsi/${basename}:${iscsiName}/tpg1/luns
|
||||
create /backstores/block/${iscsiName}
|
||||
`
|
||||
);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// iqn = target
|
||||
let iqn = basename + ":" + iscsiName;
|
||||
this.ctx.logger.info("iqn: " + iqn);
|
||||
|
||||
volume_context = {
|
||||
node_attach_driver: "iscsi",
|
||||
portal: this.options.iscsi.targetPortal,
|
||||
portals: this.options.iscsi.targetPortals.join(","),
|
||||
interface: this.options.iscsi.interface,
|
||||
iqn: iqn,
|
||||
lun: 0
|
||||
};
|
||||
return volume_context;
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`invalid configuration: unknown driver ${this.options.driver}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async deleteShare(call, datasetName) {
|
||||
const zb = this.getZetabyte();
|
||||
const sshClient = this.getSshClient();
|
||||
|
||||
let response;
|
||||
|
||||
switch (this.options.driver) {
|
||||
case "zfs-generic-nfs":
|
||||
switch (this.options.nfs.shareStrategy) {
|
||||
case "setDatasetProperties":
|
||||
break;
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`invalid configuration: unknown shareStrategy ${this.options.nfs.shareStrategy}`
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
let basename;
|
||||
let iscsiName = zb.helpers.extractLeafName(datasetName);
|
||||
if (this.options.iscsi.namePrefix) {
|
||||
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
||||
}
|
||||
|
||||
if (this.options.iscsi.nameSuffix) {
|
||||
iscsiName += this.options.iscsi.nameSuffix;
|
||||
}
|
||||
|
||||
iscsiName = iscsiName.toLowerCase();
|
||||
switch (this.options.iscsi.shareStrategy) {
|
||||
case "targetCli":
|
||||
basename = this.options.iscsi.shareStragetyTargetCli.basename;
|
||||
response = await this.targetCliCommand(
|
||||
`
|
||||
cd /iscsi
|
||||
delete ${basename}:${iscsiName}
|
||||
|
||||
cd /backstores/block
|
||||
delete ${iscsiName}
|
||||
`
|
||||
);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`invalid configuration: unknown driver ${this.options.driver}`
|
||||
);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
async expandVolume(call, datasetName) {
|
||||
switch (this.options.driver) {
|
||||
case "zfs-generic-nfs":
|
||||
break;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
switch (this.options.iscsi.shareStrategy) {
|
||||
case "targetCli":
|
||||
// nothing required, just need to rescan on the node
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
async targetCliCommand(data) {
|
||||
const sshClient = this.getSshClient();
|
||||
data = data.trim();
|
||||
|
||||
let args = ["-c"];
|
||||
let command = [];
|
||||
command.push(`echo "${data}"`.trim());
|
||||
command.push("|");
|
||||
command.push("targetcli");
|
||||
|
||||
args.push("'" + command.join(" ") + "'");
|
||||
return sshClient.exec(sshClient.buildCommand("sh", args));
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.ControllerZfsGenericDriver = ControllerZfsGenericDriver;
|
||||
|
|
@ -708,10 +708,12 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
const res = {
|
||||
volume: {
|
||||
volume_id: name,
|
||||
capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
||||
//capacity_bytes: this.options.zfs.datasetEnableQuotas
|
||||
// ? capacity_bytes
|
||||
// : 0,
|
||||
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
||||
capacity_bytes:
|
||||
this.options.zfs.datasetEnableQuotas ||
|
||||
driverZfsResourceType == "volume"
|
||||
? capacity_bytes
|
||||
: 0,
|
||||
content_source: volume_content_source,
|
||||
volume_context
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,14 @@
|
|||
const { FreeNASDriver } = require("./freenas");
|
||||
const { ControllerZfsGenericDriver } = require("./controller-zfs-generic");
|
||||
|
||||
function factory(ctx, options) {
|
||||
switch (options.driver) {
|
||||
case "freenas-nfs":
|
||||
case "freenas-iscsi":
|
||||
return new FreeNASDriver(ctx, options);
|
||||
case "zfs-generic-nfs":
|
||||
case "zfs-generic-iscsi":
|
||||
return new ControllerZfsGenericDriver(ctx, options);
|
||||
default:
|
||||
throw new Error("invalid csi driver: " + options.driver);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue