better error messages, force manual iscsi login, new zfs-local-ephemeral-inline driver, update deps

This commit is contained in:
Travis Glenn Hansen 2020-07-08 11:23:25 -06:00
parent 1609f718d3
commit cf150020a0
9 changed files with 1150 additions and 741 deletions

View File

@ -11,7 +11,7 @@ const args = require("yargs")
.option("driver-config-file", {
describe: "provide a path to driver config file",
config: true,
configParser: path => {
configParser: (path) => {
try {
options = JSON.parse(fs.readFileSync(path, "utf-8"));
return true;
@ -23,40 +23,40 @@ const args = require("yargs")
} catch (e) {}
throw new Error("failed parsing config file: " + path);
}
},
})
.demandOption(["driver-config-file"], "driver-config-file is required")
.option("log-level", {
describe: "log level",
choices: ["error", "warn", "info", "verbose", "debug", "silly"]
choices: ["error", "warn", "info", "verbose", "debug", "silly"],
})
.option("csi-version", {
describe: "versin of the csi spec to load",
choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0"]
choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0"],
})
.demandOption(["csi-version"], "csi-version is required")
.option("csi-name", {
describe: "name to use for driver registration"
describe: "name to use for driver registration",
})
.demandOption(["csi-name"], "csi-name is required")
.option("csi-mode", {
describe: "mode of the controller",
choices: ["controller", "node"],
type: "array",
default: ["controller", "node"]
default: ["controller", "node"],
})
.demandOption(["csi-mode"], "csi-mode is required")
.option("server-address", {
describe: "listen address for the server",
type: "string"
type: "string",
})
.option("server-port", {
describe: "listen port for the server",
type: "number"
type: "number",
})
.option("server-socket", {
describe: "listen socket for the server",
type: "string"
type: "string",
})
.version()
.help().argv;
@ -87,7 +87,7 @@ const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
longs: String,
enums: String,
defaults: true,
oneofs: true
oneofs: true,
});
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition);
@ -97,7 +97,10 @@ logger.info("initializing csi driver: %s", options.driver);
let driver;
try {
driver = require("../src/driver/factory").factory({ logger, args, cache, package }, options);
driver = require("../src/driver/factory").factory(
{ logger, args, cache, package },
options
);
} catch (err) {
logger.error(err.toString());
process.exit(1);
@ -127,20 +130,26 @@ async function requestHandlerProxy(call, callback, serviceMethodName) {
);
callback(null, response);
} catch (e) {
let message;
if (e instanceof Error) {
message = e.toString();
} else {
message = JSON.stringify(e);
}
logger.error(
"handler error - driver: %s method: %s error: %s",
driver.constructor.name,
serviceMethodName,
JSON.stringify(e)
message
);
if (e.name == "GrpcError") {
callback(e);
} else {
// TODO: only show real error string in development mode
const message = true
? e.toString()
: "unknown error, please inspect service logs";
message = true ? message : "unknown error, please inspect service logs";
callback({ code: grpc.status.INTERNAL, message });
}
}
@ -159,7 +168,7 @@ function getServer() {
},
async Probe(call, callback) {
requestHandlerProxy(call, callback, arguments.callee.name);
}
},
});
// Controller Service
@ -200,7 +209,7 @@ function getServer() {
},
async ControllerExpandVolume(call, callback) {
requestHandlerProxy(call, callback, arguments.callee.name);
}
},
});
}
@ -230,7 +239,7 @@ function getServer() {
},
async NodeGetInfo(call, callback) {
requestHandlerProxy(call, callback, arguments.callee.name);
}
},
});
}
@ -274,8 +283,8 @@ if (bindSocket) {
csiServer.start();
[`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`].forEach(
eventType => {
process.on(eventType, code => {
(eventType) => {
process.on(eventType, (code) => {
console.log(`running server shutdown, exit code: ${code}`);
let socketPath = args.serverSocket || "";
socketPath = socketPath.replace(/^unix:\/\//g, "");

View File

@ -0,0 +1,16 @@
driver: zfs-local-ephemeral-inline
service:
identity: {}
controller: {}
node: {}
zfs:
#chroot: "/host"
datasetParentName: tank/k8s/inline
properties:
# add any arbitrary properties you want here
#refquota:
# value: 10M
# allowOverride: false # default is to allow inline settings to override
#refreservation:
# value: 5M
# ...

1027
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -18,17 +18,17 @@
"url": "https://github.com/democratic-csi/democratic-csi.git"
},
"dependencies": {
"@grpc/proto-loader": "^0.5.3",
"bunyan": "^1.8.12",
"eslint": "^6.6.0",
"@grpc/proto-loader": "^0.5.4",
"bunyan": "^1.8.14",
"eslint": "^7.4.0",
"grpc-uds": "^0.1.4",
"js-yaml": "^3.13.1",
"js-yaml": "^3.14.0",
"lru-cache": "^5.1.1",
"request": "^2.88.0",
"ssh2": "^0.8.6",
"request": "^2.88.2",
"ssh2": "^0.8.9",
"uri-js": "^4.2.2",
"uuid": "^3.3.3",
"winston": "^3.2.1",
"yargs": "^15.0.2"
"uuid": "^8.2.0",
"winston": "^3.3.3",
"yargs": "^15.4.0"
}
}

View File

@ -3,6 +3,7 @@ const SshClient = require("../../utils/ssh").SshClient;
const { GrpcError, grpc } = require("../../utils/grpc");
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
const uuidv4 = require("uuid").v4;
// zfs common properties
const MANAGED_PROPERTY_NAME = "democratic-csi:managed_resource";
@ -56,7 +57,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE"
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
@ -66,7 +67,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
"ONLINE"
"ONLINE",
//"OFFLINE"
];
}
@ -84,7 +85,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
"LIST_SNAPSHOTS",
"CLONE_VOLUME",
//"PUBLISH_READONLY",
"EXPAND_VOLUME"
"EXPAND_VOLUME",
];
}
@ -96,7 +97,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS"
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
break;
@ -105,7 +106,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
"EXPAND_VOLUME"
"EXPAND_VOLUME",
];
break;
}
@ -115,7 +116,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
getSshClient() {
return new SshClient({
logger: this.ctx.logger,
connection: this.options.sshConnection
connection: this.options.sshConnection,
});
}
@ -123,7 +124,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
const sshClient = this.getSshClient();
return new Zetabyte({
executor: new ZfsSshProcessManager(sshClient),
idempotent: true
idempotent: true,
});
}
@ -160,7 +161,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
let message = null;
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
const valid = capabilities.every(capability => {
const valid = capabilities.every((capability) => {
switch (driverZfsResourceType) {
case "filesystem":
if (capability.access_type != "mount") {
@ -183,7 +184,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER"
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
@ -210,7 +211,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
"SINGLE_NODE_WRITER",
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER"
"MULTI_NODE_SINGLE_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
@ -436,12 +437,12 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
// remove snapshots from target
await this.removeSnapshotsFromDatatset(datasetName, {
force: true
force: true,
});
} else {
try {
response = await zb.zfs.clone(fullSnapshotName, datasetName, {
properties: volumeProperties
properties: volumeProperties,
});
} catch (err) {
if (err.toString().includes("dataset does not exist")) {
@ -461,7 +462,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
await zb.zfs.destroy(fullSnapshotName, {
recurse: true,
force: true,
defer: true
defer: true,
});
} catch (err) {
if (err.toString().includes("dataset does not exist")) {
@ -543,21 +544,21 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
// remove snapshots from target
await this.removeSnapshotsFromDatatset(datasetName, {
force: true
force: true,
});
// remove snapshot from source
await zb.zfs.destroy(fullSnapshotName, {
recurse: true,
force: true,
defer: true
defer: true,
});
} else {
// create clone
// zfs origin property contains parent info, ie: pool0/k8s/test/PVC-111@clone-test
try {
response = await zb.zfs.clone(fullSnapshotName, datasetName, {
properties: volumeProperties
properties: volumeProperties,
});
} catch (err) {
if (err.toString().includes("dataset does not exist")) {
@ -587,7 +588,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
await zb.zfs.create(datasetName, {
parents: true,
properties: volumeProperties,
size: driverZfsResourceType == "volume" ? capacity_bytes : false
size: driverZfsResourceType == "volume" ? capacity_bytes : false,
});
}
@ -632,7 +633,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
"compression",
VOLUME_CSI_NAME_PROPERTY_NAME,
VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME,
VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME
VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME,
]);
properties = properties[datasetName];
driver.ctx.logger.debug("zfs props data: %j", properties);
@ -641,7 +642,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
if (this.options.zfs.datasetPermissionsMode) {
command = sshClient.buildCommand("chmod", [
this.options.zfs.datasetPermissionsMode,
properties.mountpoint.value
properties.mountpoint.value,
]);
driver.ctx.logger.verbose("set permission command: %s", command);
response = await sshClient.exec(command);
@ -660,7 +661,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
(this.options.zfs.datasetPermissionsGroup
? this.options.zfs.datasetPermissionsGroup
: ""),
properties.mountpoint.value
properties.mountpoint.value,
]);
driver.ctx.logger.verbose("set ownership command: %s", command);
response = await sshClient.exec(command);
@ -691,7 +692,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
volume_context = await this.createShare(call, datasetName);
await zb.zfs.set(datasetName, {
[SHARE_VOLUME_CONTEXT_PROPERTY_NAME]:
"'" + JSON.stringify(volume_context) + "'"
"'" + JSON.stringify(volume_context) + "'",
});
volume_context["provisioner_driver"] = driver.options.driver;
@ -714,8 +715,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
? capacity_bytes
: 0,
content_source: volume_content_source,
volume_context
}
volume_context,
},
};
return res;
@ -761,7 +762,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
"origin",
"refquota",
"compression",
VOLUME_CSI_NAME_PROPERTY_NAME
VOLUME_CSI_NAME_PROPERTY_NAME,
]);
properties = properties[datasetName];
} catch (err) {
@ -798,7 +799,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
await zb.zfs.destroy(properties.origin.value, {
recurse: true,
force: true,
defer: true
defer: true,
});
} catch (err) {
if (err.toString().includes("snapshot has dependent clones")) {
@ -939,7 +940,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
return {
capacity_bytes: this.options.zfs.datasetEnableQuotas ? capacity_bytes : 0,
node_expansion_required: driverZfsResourceType == "volume" ? true : false
node_expansion_required: driverZfsResourceType == "volume" ? true : false,
};
}
@ -1017,7 +1018,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
}
const data = {
entries: entries,
next_token: next_token
next_token: next_token,
};
return data;
@ -1061,7 +1062,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
SHARE_VOLUME_CONTEXT_PROPERTY_NAME,
SUCCESS_PROPERTY_NAME,
VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME,
VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME
VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME,
],
{ types, recurse: true }
);
@ -1069,7 +1070,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
if (err.toString().includes("dataset does not exist")) {
return {
entries: [],
next_token: null
next_token: null,
};
}
@ -1084,7 +1085,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
}
entries = [];
response.indexed.forEach(row => {
response.indexed.forEach((row) => {
// ignore rows were csi_name is empty
if (row[MANAGED_PROPERTY_NAME] != "true") {
return;
@ -1142,8 +1143,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
? row["refquota"]
: row["volsize"],
content_source: volume_content_source,
volume_context
}
volume_context,
},
});
});
@ -1159,7 +1160,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
const data = {
entries: entries,
next_token: next_token
next_token: next_token,
};
return data;
@ -1205,7 +1206,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
}
const data = {
entries: entries,
next_token: next_token
next_token: next_token,
};
return data;
@ -1290,7 +1291,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
"used",
VOLUME_CSI_NAME_PROPERTY_NAME,
SNAPSHOT_CSI_NAME_PROPERTY_NAME,
MANAGED_PROPERTY_NAME
MANAGED_PROPERTY_NAME,
],
{ types, recurse: true }
);
@ -1314,7 +1315,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
throw new GrpcError(grpc.status.FAILED_PRECONDITION, e.toString());
}
response.indexed.forEach(row => {
response.indexed.forEach((row) => {
// skip any snapshots not explicitly created by CO
if (row[MANAGED_PROPERTY_NAME] != "true") {
return;
@ -1371,10 +1372,10 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: row["creation"],
nanos: 0
nanos: 0,
},
ready_to_use: true,
},
ready_to_use: true
}
});
});
}
@ -1391,7 +1392,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
const data = {
entries: entries,
next_token: next_token
next_token: next_token,
};
return data;
@ -1552,7 +1553,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
{
recurse: true,
force: true,
defer: true
defer: true,
}
);
@ -1560,12 +1561,12 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
await zb.zfs.destroy(tmpSnapshotName, {
recurse: true,
force: true,
defer: true
defer: true,
});
} else {
try {
await zb.zfs.snapshot(fullSnapshotName, {
properties: snapshotProperties
properties: snapshotProperties,
});
} catch (err) {
if (err.toString().includes("dataset does not exist")) {
@ -1592,7 +1593,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
VOLUME_CSI_NAME_PROPERTY_NAME,
SNAPSHOT_CSI_NAME_PROPERTY_NAME,
SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME,
MANAGED_PROPERTY_NAME
MANAGED_PROPERTY_NAME,
],
{ types }
);
@ -1623,10 +1624,10 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: properties.creation.value,
nanos: 0
nanos: 0,
},
ready_to_use: true,
},
ready_to_use: true
}
};
}
@ -1673,7 +1674,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
await zb.zfs.destroy(fullSnapshotName, {
recurse: true,
force: true,
defer: zb.helpers.isZfsSnapshot(snapshot_id) // only defer when snapshot
defer: zb.helpers.isZfsSnapshot(snapshot_id), // only defer when snapshot
});
} catch (err) {
if (err.toString().includes("snapshot has dependent clones")) {
@ -1720,8 +1721,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters
}
parameters: call.request.parameters,
},
};
}
}

View File

@ -1,14 +1,21 @@
const { FreeNASDriver } = require("./freenas");
const { ControllerZfsGenericDriver } = require("./controller-zfs-generic");
const {
ZfsLocalEphemeralInlineDriver,
} = require("./zfs-local-ephemeral-inline");
function factory(ctx, options) {
switch (options.driver) {
case "freenas-nfs":
case "freenas-iscsi":
case "truenas-nfs":
case "truenas-iscsi":
return new FreeNASDriver(ctx, options);
case "zfs-generic-nfs":
case "zfs-generic-iscsi":
return new ControllerZfsGenericDriver(ctx, options);
case "zfs-local-ephemeral-inline":
return new ZfsLocalEphemeralInlineDriver(ctx, options);
default:
throw new Error("invalid csi driver: " + options.driver);
}

View File

@ -298,7 +298,11 @@ class CsiBaseDriver {
break;
case "iscsi":
// create DB entry
let nodeDB = {};
// https://library.netapp.com/ecmdocs/ECMP1654943/html/GUID-8EC685B4-8CB6-40D8-A8D5-031A3899BCDC.html
// put these options in place to force targets managed by csi to be explicitly attached (in the case of unclearn shutdown etc)
let nodeDB = {
"node.startup": "manual"
};
const nodeDBKeyPrefix = "node-db.";
const normalizedSecrets = this.getNormalizedParameters(
call.request.secrets,

View File

@ -0,0 +1,422 @@
const fs = require("fs");
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const { Filesystem } = require("../../utils/filesystem");
const SshClient = require("../../utils/ssh").SshClient;
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
// zfs common properties
const MANAGED_PROPERTY_NAME = "democratic-csi:managed_resource";
const SUCCESS_PROPERTY_NAME = "democratic-csi:provision_success";
const VOLUME_CSI_NAME_PROPERTY_NAME = "democratic-csi:csi_volume_name";
const VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME =
"democratic-csi:volume_context_provisioner_driver";
const VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME =
"democratic-csi:volume_context_provisioner_instance_id";
/**
* https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
* https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
*
* inline drivers are assumed to be mount only (no block support)
* purposely there is no native support for size contraints
*
*/
class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
"UNKNOWN",
//"CONTROLLER_SERVICE"
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
"UNKNOWN",
//"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
//"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
//"LIST_VOLUMES",
//"GET_CAPACITY",
//"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
//"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME"
];
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
//"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME",
];
}
}
getSshClient() {
return new SshClient({
logger: this.ctx.logger,
connection: this.options.sshConnection,
});
}
getZetabyte() {
let sshClient;
let executor;
if (this.options.sshConnection) {
sshClient = this.getSshClient();
executor = new ZfsSshProcessManager(sshClient);
}
return new Zetabyte({
executor,
idempotent: true,
chroot: this.options.zfs.chroot,
paths: {
zpool: "/usr/sbin/zpool",
zfs: "/usr/sbin/zfs",
},
});
}
getDatasetParentName() {
let datasetParentName = this.options.zfs.datasetParentName;
datasetParentName = datasetParentName.replace(/\/$/, "");
return datasetParentName;
}
getVolumeParentDatasetName() {
let datasetParentName = this.getDatasetParentName();
datasetParentName += "/v";
datasetParentName = datasetParentName.replace(/\/$/, "");
return datasetParentName;
}
assertCapabilities(capabilities) {
const driverZfsResourceType = this.getDriverZfsResourceType();
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
const valid = capabilities.every((capability) => {
switch (driverZfsResourceType) {
case "filesystem":
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!["nfs"].includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
case "volume":
if (capability.access_type == "mount") {
if (
capability.mount.fs_type &&
!["ext3", "ext4", "ext4dev", "xfs"].includes(
capability.mount.fs_type
)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
}
});
return { valid, message };
}
/**
* This should create a dataset with appropriate volume properties, ensuring
* the mountpoint is the target_path
*
* Any volume_context attributes starting with property.<name> will be set as zfs properties
*
* @param {*} call
*/
async NodePublishVolume(call) {
const driver = this;
const zb = this.getZetabyte();
const volume_id = call.request.volume_id;
const staging_target_path = call.request.staging_target_path || "";
const target_path = call.request.target_path;
const capability = call.request.volume_capability;
const access_type = capability.access_type || "mount";
const readonly = call.request.readonly;
const volume_context = call.request.volume_context;
let datasetParentName = this.getVolumeParentDatasetName();
let name = volume_id;
if (!datasetParentName) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing datasetParentName`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
if (!target_path) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`target_path is required`
);
}
const datasetName = datasetParentName + "/" + name;
// TODO: support arbitrary values from config
// TODO: support arbitrary props from volume_context
let volumeProperties = {};
// set user-supplied properties
// this come from volume_context from keys starting with property.<foo>
const base_key = "property.";
const prefixLength = `${base_key}`.length;
Object.keys(volume_context).forEach(function (key) {
if (key.startsWith(base_key)) {
let normalizedKey = key.slice(prefixLength);
volumeProperties[normalizedKey] = volume_context[key];
}
});
// set standard properties
volumeProperties[VOLUME_CSI_NAME_PROPERTY_NAME] = name;
volumeProperties[MANAGED_PROPERTY_NAME] = "true";
volumeProperties[VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME] =
driver.options.driver;
if (driver.options.instance_id) {
volumeProperties[VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME] =
driver.options.instance_id;
}
volumeProperties[SUCCESS_PROPERTY_NAME] = "true";
// NOTE: setting mountpoint will automatically create the full path as necessary so no need for mkdir etc
volumeProperties["mountpoint"] = target_path;
// set driver config properties
if (this.options.zfs.properties) {
Object.keys(driver.options.zfs.properties).forEach(function (key) {
const value = driver.options.zfs.properties[key]["value"];
const allowOverride =
"allowOverride" in driver.options.zfs.properties[key]
? driver.options.zfs.properties[key]["allowOverride"]
: true;
if (!allowOverride || !(key in volumeProperties)) {
volumeProperties[key] = value;
}
});
}
await zb.zfs.create(datasetName, {
parents: true,
properties: volumeProperties,
});
return {};
}
/**
* This should destroy the dataset and remove target_path as appropriate
*
* @param {*} call
*/
async NodeUnpublishVolume(call) {
const zb = this.getZetabyte();
const filesystem = new Filesystem();
let result;
const volume_id = call.request.volume_id;
const target_path = call.request.target_path;
let datasetParentName = this.getVolumeParentDatasetName();
let name = volume_id;
if (!datasetParentName) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing datasetParentName`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
if (!target_path) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`target_path is required`
);
}
const datasetName = datasetParentName + "/" + name;
// NOTE: -f does NOT allow deletes if dependent filesets exist
// NOTE: -R will recursively delete items + dependent filesets
// delete dataset
try {
await zb.zfs.destroy(datasetName, { recurse: true, force: true });
} catch (err) {
if (err.toString().includes("filesystem has dependent clones")) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
"filesystem has dependent clones"
);
}
throw err;
}
// cleanup publish directory
result = await filesystem.pathExists(target_path);
if (result) {
if (fs.lstatSync(target_path).isDirectory()) {
result = await filesystem.rmdir(target_path);
} else {
result = await filesystem.rm([target_path]);
}
}
return {};
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
const driver = this;
const zb = this.getZetabyte();
let datasetParentName = this.getVolumeParentDatasetName();
if (!datasetParentName) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing datasetParentName`
);
}
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { available_capacity: 0 };
}
}
const datasetName = datasetParentName;
let properties;
properties = await zb.zfs.get(datasetName, ["avail"]);
properties = properties[datasetName];
return { available_capacity: properties.available.value };
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ZfsLocalEphemeralInlineDriver = ZfsLocalEphemeralInlineDriver;

View File

@ -19,13 +19,17 @@ class Zetabyte {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.paths.chroot) {
options.paths.chroot = "/usr/sbin/chroot";
}
if (!options.timeout) {
options.timeout = 10 * 60 * 1000;
}
if (!options.executor) {
options.executor = {
spawn: cp.spawn
spawn: cp.spawn,
};
}
@ -36,7 +40,7 @@ class Zetabyte {
"free",
"cap",
"health",
"altroot"
"altroot",
];
zb.DEFAULT_ZFS_LIST_PROPERTIES = [
@ -45,7 +49,7 @@ class Zetabyte {
"avail",
"refer",
"type",
"mountpoint"
"mountpoint",
];
zb.helpers = {
@ -100,7 +104,7 @@ class Zetabyte {
properties[fields[0]][fields[1]] = {
value: fields[2],
received: fields[3],
source: fields[4]
source: fields[4],
};
});
@ -109,7 +113,7 @@ class Zetabyte {
listTableToPropertyList: function (properties, data) {
const entries = [];
data.forEach(row => {
data.forEach((row) => {
let entry = {};
properties.forEach((value, index) => {
entry[value] = row[index];
@ -211,7 +215,7 @@ class Zetabyte {
result = Number(result) + Number(block_size);
return result;
}
},
};
zb.zpool = {
@ -400,7 +404,7 @@ class Zetabyte {
args.push("export");
if (options.force) args.push("-f");
if (Array.isArray(pool)) {
pool.forEach(item => {
pool.forEach((item) => {
args.push(item);
});
} else {
@ -436,7 +440,7 @@ class Zetabyte {
if (options.internal) args.push("-i");
if (options.longFormat) args.push("-l");
if (Array.isArray(pool)) {
pool.forEach(item => {
pool.forEach((item) => {
args.push(item);
});
} else {
@ -535,7 +539,7 @@ class Zetabyte {
if (options.timestamp) args = args.concat(["-T", options.timestamp]);
if (pool) {
if (Array.isArray(pool)) {
pool.forEach(item => {
pool.forEach((item) => {
args.push(item);
});
} else {
@ -560,7 +564,7 @@ class Zetabyte {
return resolve({
properties,
data,
indexed
indexed,
});
}
return resolve({ properties, data: stdout });
@ -743,7 +747,7 @@ class Zetabyte {
if (options.stop) args.push("-s");
if (options.pause) args.push("-p");
if (Array.isArray(pool)) {
pool.forEach(item => {
pool.forEach((item) => {
args.push(item);
});
} else {
@ -811,7 +815,7 @@ class Zetabyte {
if (options.timestamp) args = args.concat(["-T", options.timestamp]);
if (pool) {
if (Array.isArray(pool)) {
pool.forEach(item => {
pool.forEach((item) => {
args.push(item);
});
} else {
@ -863,7 +867,7 @@ class Zetabyte {
if (options.all) args.push("-a");
if (pool) {
if (Array.isArray(pool)) {
pool.forEach(item => {
pool.forEach((item) => {
args.push(item);
});
} else {
@ -881,7 +885,7 @@ class Zetabyte {
}
);
});
}
},
};
zb.zfs = {
@ -1269,7 +1273,7 @@ class Zetabyte {
return resolve({
properties,
data,
indexed
indexed,
});
}
return resolve({ properties, data: stdout });
@ -1344,7 +1348,7 @@ class Zetabyte {
if (options.parse)
args = args.concat([
"-o",
["name", "property", "value", "received", "source"]
["name", "property", "value", "received", "source"],
]);
if (options.fields && !options.parse) {
let fields;
@ -1487,7 +1491,7 @@ class Zetabyte {
}
);
});
}
},
};
}
@ -1518,6 +1522,13 @@ class Zetabyte {
break;
}
if (zb.options.chroot) {
args = args || [];
args.unshift(command);
args.unshift(zb.options.chroot);
command = zb.options.paths.chroot;
}
if (zb.options.sudo) {
args = args || [];
args.unshift(command);
@ -1609,7 +1620,7 @@ class ZfsSshProcessManager {
client.debug("ZfsProcessManager arguments: " + JSON.stringify(arguments));
client.logger.verbose("ZfsProcessManager command: " + command);
client.exec(command, {}, proxy).catch(err => {
client.exec(command, {}, proxy).catch((err) => {
proxy.stderr.emit("data", err.message);
proxy.emit("close", 1, "SIGQUIT");
});