initial support for nvmeof
Signed-off-by: Travis Glenn Hansen <travisghansen@yahoo.com>
This commit is contained in:
parent
c8b13450d2
commit
9d2943b62d
|
|
@ -256,6 +256,7 @@ jobs:
|
|||
- zfs-generic/iscsi.yaml
|
||||
- zfs-generic/nfs.yaml
|
||||
- zfs-generic/smb.yaml
|
||||
- zfs-generic/nvmeof.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
driver: zfs-generic-nvmeof
|
||||
|
||||
sshConnection:
|
||||
host: ${SERVER_HOST}
|
||||
port: 22
|
||||
username: ${SERVER_USERNAME}
|
||||
password: ${SERVER_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
|
||||
nvmeof:
|
||||
transports:
|
||||
- "tcp://${SERVER_HOST}:4420"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
shareStrategy: "nvmetCli"
|
||||
shareStrategyNvmetCli:
|
||||
basename: "nqn.2003-01.org.linux-nvmeof.ubuntu-19.x8664"
|
||||
ports:
|
||||
- "1"
|
||||
subsystem:
|
||||
attributes:
|
||||
allow_any_host: 1
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "democratic-csi",
|
||||
"version": "1.7.7",
|
||||
"version": "1.8.0",
|
||||
"description": "kubernetes csi driver framework",
|
||||
"main": "bin/democratic-csi",
|
||||
"scripts": {
|
||||
|
|
@ -20,11 +20,11 @@
|
|||
"dependencies": {
|
||||
"@grpc/grpc-js": "^1.5.7",
|
||||
"@grpc/proto-loader": "^0.7.0",
|
||||
"@kubernetes/client-node": "^0.17.0",
|
||||
"@kubernetes/client-node": "^0.18.0",
|
||||
"async-mutex": "^0.4.0",
|
||||
"axios": "^1.1.3",
|
||||
"bunyan": "^1.8.15",
|
||||
"fs-extra": "^10.1.0",
|
||||
"fs-extra": "^11.1.0",
|
||||
"handlebars": "^4.7.7",
|
||||
"js-yaml": "^4.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
|
|
|
|||
|
|
@ -3,20 +3,29 @@ const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
|||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const registry = require("../../utils/registry");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const LocalCliExecClient =
|
||||
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
|
||||
const ISCSI_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:iscsi_assets_name";
|
||||
const NVMEOF_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:nvmeof_assets_name";
|
||||
const __REGISTRY_NS__ = "ControllerZfsGenericDriver";
|
||||
class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
||||
getExecClient() {
|
||||
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
||||
return new SshClient({
|
||||
logger: this.ctx.logger,
|
||||
connection: this.options.sshConnection,
|
||||
});
|
||||
if (this.options.sshConnection) {
|
||||
return new SshClient({
|
||||
logger: this.ctx.logger,
|
||||
connection: this.options.sshConnection,
|
||||
});
|
||||
} else {
|
||||
return new LocalCliExecClient({
|
||||
logger: this.ctx.logger,
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -24,7 +33,11 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
||||
const execClient = this.getExecClient();
|
||||
const options = {};
|
||||
options.executor = new ZfsSshProcessManager(execClient);
|
||||
if (this.options.sshConnection) {
|
||||
options.executor = new ZfsSshProcessManager(execClient);
|
||||
} else {
|
||||
options.executor = execClient;
|
||||
}
|
||||
options.idempotent = true;
|
||||
|
||||
if (
|
||||
|
|
@ -55,6 +68,7 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
case "zfs-generic-smb":
|
||||
return "filesystem";
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-nvmeof":
|
||||
return "volume";
|
||||
default:
|
||||
throw new Error("unknown driver: " + this.ctx.args.driver);
|
||||
|
|
@ -164,28 +178,28 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
};
|
||||
return volume_context;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-iscsi": {
|
||||
let basename;
|
||||
let iscsiName;
|
||||
let assetName;
|
||||
|
||||
if (this.options.iscsi.nameTemplate) {
|
||||
iscsiName = Handlebars.compile(this.options.iscsi.nameTemplate)({
|
||||
assetName = Handlebars.compile(this.options.iscsi.nameTemplate)({
|
||||
name: call.request.name,
|
||||
parameters: call.request.parameters,
|
||||
});
|
||||
} else {
|
||||
iscsiName = zb.helpers.extractLeafName(datasetName);
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
}
|
||||
|
||||
if (this.options.iscsi.namePrefix) {
|
||||
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
||||
assetName = this.options.iscsi.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.iscsi.nameSuffix) {
|
||||
iscsiName += this.options.iscsi.nameSuffix;
|
||||
assetName += this.options.iscsi.nameSuffix;
|
||||
}
|
||||
|
||||
iscsiName = iscsiName.toLowerCase();
|
||||
assetName = assetName.toLowerCase();
|
||||
|
||||
let extentDiskName = "zvol/" + datasetName;
|
||||
|
||||
|
|
@ -239,20 +253,20 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
`
|
||||
# create target
|
||||
cd /iscsi
|
||||
create ${basename}:${iscsiName}
|
||||
create ${basename}:${assetName}
|
||||
|
||||
# setup tpg
|
||||
cd /iscsi/${basename}:${iscsiName}/tpg1
|
||||
cd /iscsi/${basename}:${assetName}/tpg1
|
||||
${setAttributesText}
|
||||
${setAuthText}
|
||||
|
||||
# create extent
|
||||
cd /backstores/block
|
||||
create ${iscsiName} /dev/${extentDiskName}
|
||||
create ${assetName} /dev/${extentDiskName}
|
||||
|
||||
# add extent to target/tpg
|
||||
cd /iscsi/${basename}:${iscsiName}/tpg1/luns
|
||||
create /backstores/block/${iscsiName}
|
||||
cd /iscsi/${basename}:${assetName}/tpg1/luns
|
||||
create /backstores/block/${assetName}
|
||||
`
|
||||
);
|
||||
},
|
||||
|
|
@ -271,12 +285,12 @@ create /backstores/block/${iscsiName}
|
|||
}
|
||||
|
||||
// iqn = target
|
||||
let iqn = basename + ":" + iscsiName;
|
||||
let iqn = basename + ":" + assetName;
|
||||
this.ctx.logger.info("iqn: " + iqn);
|
||||
|
||||
// store this off to make delete process more bullet proof
|
||||
await zb.zfs.set(datasetName, {
|
||||
[ISCSI_ASSETS_NAME_PROPERTY_NAME]: iscsiName,
|
||||
[ISCSI_ASSETS_NAME_PROPERTY_NAME]: assetName,
|
||||
});
|
||||
|
||||
volume_context = {
|
||||
|
|
@ -290,6 +304,231 @@ create /backstores/block/${iscsiName}
|
|||
lun: 0,
|
||||
};
|
||||
return volume_context;
|
||||
}
|
||||
|
||||
case "zfs-generic-nvmeof": {
|
||||
let basename;
|
||||
let assetName;
|
||||
|
||||
if (this.options.nvmeof.nameTemplate) {
|
||||
assetName = Handlebars.compile(this.options.nvmeof.nameTemplate)({
|
||||
name: call.request.name,
|
||||
parameters: call.request.parameters,
|
||||
});
|
||||
} else {
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
}
|
||||
|
||||
if (this.options.nvmeof.namePrefix) {
|
||||
assetName = this.options.nvmeof.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.nvmeof.nameSuffix) {
|
||||
assetName += this.options.nvmeof.nameSuffix;
|
||||
}
|
||||
|
||||
assetName = assetName.toLowerCase();
|
||||
|
||||
let extentDiskName = "zvol/" + datasetName;
|
||||
|
||||
/**
|
||||
* limit is a FreeBSD limitation
|
||||
* https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
|
||||
*/
|
||||
//if (extentDiskName.length > 63) {
|
||||
// throw new GrpcError(
|
||||
// grpc.status.FAILED_PRECONDITION,
|
||||
// `extent disk name cannot exceed 63 characters: ${extentDiskName}`
|
||||
// );
|
||||
//}
|
||||
|
||||
let namespace = 1;
|
||||
|
||||
switch (this.options.nvmeof.shareStrategy) {
|
||||
case "nvmetCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategyNvmetCli.basename;
|
||||
let savefile = _.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.configPath",
|
||||
""
|
||||
);
|
||||
if (savefile) {
|
||||
savefile = `savefile=${savefile}`;
|
||||
}
|
||||
let setSubsystemAttributesText = "";
|
||||
if (this.options.nvmeof.shareStrategyNvmetCli.subsystem) {
|
||||
if (
|
||||
this.options.nvmeof.shareStrategyNvmetCli.subsystem.attributes
|
||||
) {
|
||||
for (const attributeName in this.options.nvmeof
|
||||
.shareStrategyNvmetCli.subsystem.attributes) {
|
||||
const attributeValue =
|
||||
this.options.nvmeof.shareStrategyNvmetCli.subsystem
|
||||
.attributes[attributeName];
|
||||
setSubsystemAttributesText += "\n";
|
||||
setSubsystemAttributesText += `set attr ${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let portCommands = "";
|
||||
this.options.nvmeof.shareStrategyNvmetCli.ports.forEach(
|
||||
(port) => {
|
||||
portCommands += `
|
||||
cd /ports/${port}/subsystems
|
||||
create ${basename}:${assetName}
|
||||
`;
|
||||
}
|
||||
);
|
||||
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.nvmetCliCommand(
|
||||
`
|
||||
# create subsystem
|
||||
cd /subsystems
|
||||
create ${basename}:${assetName}
|
||||
cd ${basename}:${assetName}
|
||||
${setSubsystemAttributesText}
|
||||
|
||||
# create subsystem namespace
|
||||
cd namespaces
|
||||
create ${namespace}
|
||||
cd ${namespace}
|
||||
set device path=/dev/${extentDiskName}
|
||||
enable
|
||||
|
||||
# associate subsystem/target to port(al)
|
||||
${portCommands}
|
||||
|
||||
saveconfig ${savefile}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
case "spdkCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategySpdkCli.basename;
|
||||
let bdevAttributesText = "";
|
||||
if (this.options.nvmeof.shareStrategySpdkCli.bdev) {
|
||||
if (this.options.nvmeof.shareStrategySpdkCli.bdev.attributes) {
|
||||
for (const attributeName in this.options.nvmeof
|
||||
.shareStrategySpdkCli.bdev.attributes) {
|
||||
const attributeValue =
|
||||
this.options.nvmeof.shareStrategySpdkCli.bdev.attributes[
|
||||
attributeName
|
||||
];
|
||||
bdevAttributesText += `${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let subsystemAttributesText = "";
|
||||
if (this.options.nvmeof.shareStrategySpdkCli.subsystem) {
|
||||
if (
|
||||
this.options.nvmeof.shareStrategySpdkCli.subsystem.attributes
|
||||
) {
|
||||
for (const attributeName in this.options.nvmeof
|
||||
.shareStrategySpdkCli.subsystem.attributes) {
|
||||
const attributeValue =
|
||||
this.options.nvmeof.shareStrategySpdkCli.subsystem
|
||||
.attributes[attributeName];
|
||||
subsystemAttributesText += `${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let listenerCommands = `cd /nvmf/subsystem/${basename}:${assetName}/listen_addresses\n`;
|
||||
this.options.nvmeof.shareStrategySpdkCli.listeners.forEach(
|
||||
(listener) => {
|
||||
let listenerAttributesText = "";
|
||||
for (const attributeName in listener) {
|
||||
const attributeValue = listener[attributeName];
|
||||
listenerAttributesText += ` ${attributeName}=${attributeValue} `;
|
||||
}
|
||||
listenerCommands += `
|
||||
create ${listenerAttributesText}
|
||||
`;
|
||||
}
|
||||
);
|
||||
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.spdkCliCommand(
|
||||
`
|
||||
# create bdev
|
||||
cd /bdevs/${this.options.nvmeof.shareStrategySpdkCli.bdev.type}
|
||||
create filename=/dev/${extentDiskName} name=${basename}:${assetName} ${bdevAttributesText}
|
||||
|
||||
# create subsystem
|
||||
cd /nvmf/subsystem
|
||||
create nqn=${basename}:${assetName} ${subsystemAttributesText}
|
||||
cd ${basename}:${assetName}
|
||||
|
||||
# create namespace
|
||||
cd /nvmf/subsystem/${basename}:${assetName}/namespaces
|
||||
create bdev_name=${basename}:${assetName} nsid=${namespace}
|
||||
|
||||
# add listener
|
||||
${listenerCommands}
|
||||
|
||||
cd /
|
||||
save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// iqn = target
|
||||
let nqn = basename + ":" + assetName;
|
||||
this.ctx.logger.info("nqn: " + nqn);
|
||||
|
||||
// store this off to make delete process more bullet proof
|
||||
await zb.zfs.set(datasetName, {
|
||||
[NVMEOF_ASSETS_NAME_PROPERTY_NAME]: assetName,
|
||||
});
|
||||
|
||||
volume_context = {
|
||||
node_attach_driver: "nvmeof",
|
||||
transport: this.options.nvmeof.transport || "",
|
||||
transports: this.options.nvmeof.transports
|
||||
? this.options.nvmeof.transports.join(",")
|
||||
: "",
|
||||
nqn,
|
||||
nsid: namespace,
|
||||
};
|
||||
return volume_context;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
|
|
@ -367,9 +606,9 @@ create /backstores/block/${iscsiName}
|
|||
}
|
||||
break;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-iscsi": {
|
||||
let basename;
|
||||
let iscsiName;
|
||||
let assetName;
|
||||
|
||||
// Delete iscsi assets
|
||||
try {
|
||||
|
|
@ -386,23 +625,23 @@ create /backstores/block/${iscsiName}
|
|||
properties = properties[datasetName];
|
||||
this.ctx.logger.debug("zfs props data: %j", properties);
|
||||
|
||||
iscsiName = properties[ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
|
||||
assetName = properties[ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
|
||||
|
||||
if (zb.helpers.isPropertyValueSet(iscsiName)) {
|
||||
if (zb.helpers.isPropertyValueSet(assetName)) {
|
||||
//do nothing
|
||||
} else {
|
||||
iscsiName = zb.helpers.extractLeafName(datasetName);
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
|
||||
if (this.options.iscsi.namePrefix) {
|
||||
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
||||
assetName = this.options.iscsi.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.iscsi.nameSuffix) {
|
||||
iscsiName += this.options.iscsi.nameSuffix;
|
||||
assetName += this.options.iscsi.nameSuffix;
|
||||
}
|
||||
}
|
||||
|
||||
iscsiName = iscsiName.toLowerCase();
|
||||
assetName = assetName.toLowerCase();
|
||||
switch (this.options.iscsi.shareStrategy) {
|
||||
case "targetCli":
|
||||
basename = this.options.iscsi.shareStrategyTargetCli.basename;
|
||||
|
|
@ -414,11 +653,11 @@ create /backstores/block/${iscsiName}
|
|||
`
|
||||
# delete target
|
||||
cd /iscsi
|
||||
delete ${basename}:${iscsiName}
|
||||
delete ${basename}:${assetName}
|
||||
|
||||
# delete extent
|
||||
cd /backstores/block
|
||||
delete ${iscsiName}
|
||||
delete ${assetName}
|
||||
`
|
||||
);
|
||||
},
|
||||
|
|
@ -437,6 +676,132 @@ delete ${iscsiName}
|
|||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case "zfs-generic-nvmeof": {
|
||||
let basename;
|
||||
let assetName;
|
||||
|
||||
// Delete nvmeof assets
|
||||
try {
|
||||
properties = await zb.zfs.get(datasetName, [
|
||||
NVMEOF_ASSETS_NAME_PROPERTY_NAME,
|
||||
]);
|
||||
} catch (err) {
|
||||
if (err.toString().includes("dataset does not exist")) {
|
||||
return;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
properties = properties[datasetName];
|
||||
this.ctx.logger.debug("zfs props data: %j", properties);
|
||||
|
||||
assetName = properties[NVMEOF_ASSETS_NAME_PROPERTY_NAME].value;
|
||||
|
||||
if (zb.helpers.isPropertyValueSet(assetName)) {
|
||||
//do nothing
|
||||
} else {
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
|
||||
if (this.options.nvmeof.namePrefix) {
|
||||
assetName = this.options.nvmeof.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.nvmeof.nameSuffix) {
|
||||
assetName += this.options.nvmeof.nameSuffix;
|
||||
}
|
||||
}
|
||||
|
||||
assetName = assetName.toLowerCase();
|
||||
switch (this.options.nvmeof.shareStrategy) {
|
||||
case "nvmetCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategyNvmetCli.basename;
|
||||
let savefile = _.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.configPath",
|
||||
""
|
||||
);
|
||||
if (savefile) {
|
||||
savefile = `savefile=${savefile}`;
|
||||
}
|
||||
let portCommands = "";
|
||||
this.options.nvmeof.shareStrategyNvmetCli.ports.forEach(
|
||||
(port) => {
|
||||
portCommands += `
|
||||
cd /ports/${port}/subsystems
|
||||
delete ${basename}:${assetName}
|
||||
`;
|
||||
}
|
||||
);
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.nvmetCliCommand(
|
||||
`
|
||||
# delete subsystem from port
|
||||
${portCommands}
|
||||
|
||||
# delete subsystem
|
||||
cd /subsystems
|
||||
delete ${basename}:${assetName}
|
||||
|
||||
saveconfig ${savefile}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
case "spdkCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategySpdkCli.basename;
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.spdkCliCommand(
|
||||
`
|
||||
# delete subsystem
|
||||
cd /nvmf/subsystem/
|
||||
delete subsystem_nqn=${basename}:${assetName}
|
||||
|
||||
# delete bdev
|
||||
cd /bdevs/${this.options.nvmeof.shareStrategySpdkCli.bdev.type}
|
||||
delete name=${basename}:${assetName}
|
||||
|
||||
cd /
|
||||
save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
|
|
@ -477,18 +842,18 @@ delete ${iscsiName}
|
|||
let command = "sh";
|
||||
let args = ["-c"];
|
||||
|
||||
let targetCliArgs = ["targetcli"];
|
||||
let cliArgs = ["targetcli"];
|
||||
if (
|
||||
_.get(this.options, "iscsi.shareStrategyTargetCli.sudoEnabled", false)
|
||||
) {
|
||||
targetCliArgs.unshift("sudo");
|
||||
}
|
||||
|
||||
let targetCliCommand = [];
|
||||
targetCliCommand.push(`echo "${data}"`.trim());
|
||||
targetCliCommand.push("|");
|
||||
targetCliCommand.push(targetCliArgs.join(" "));
|
||||
args.push("'" + targetCliCommand.join(" ") + "'");
|
||||
let cliCommand = [];
|
||||
cliCommand.push(`echo "${data}"`.trim());
|
||||
cliCommand.push("|");
|
||||
cliCommand.push(cliArgs.join(" "));
|
||||
args.push("'" + cliCommand.join(" ") + "'");
|
||||
|
||||
let logCommandTmp = command + " " + args.join(" ");
|
||||
let logCommand = "";
|
||||
|
|
@ -527,6 +892,130 @@ delete ${iscsiName}
|
|||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
async nvmetCliCommand(data) {
|
||||
const execClient = this.getExecClient();
|
||||
const driver = this;
|
||||
|
||||
data = data.trim();
|
||||
|
||||
let command = "sh";
|
||||
let args = ["-c"];
|
||||
|
||||
let cliArgs = [
|
||||
_.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.nvmetcliPath",
|
||||
"nvmetcli"
|
||||
),
|
||||
];
|
||||
if (
|
||||
_.get(this.options, "nvmeof.shareStrategyNvmetCli.sudoEnabled", false)
|
||||
) {
|
||||
cliArgs.unshift("sudo");
|
||||
}
|
||||
|
||||
let cliCommand = [];
|
||||
cliCommand.push(`echo "${data}"`.trim());
|
||||
cliCommand.push("|");
|
||||
cliCommand.push(cliArgs.join(" "));
|
||||
args.push("'" + cliCommand.join(" ") + "'");
|
||||
|
||||
let logCommandTmp = command + " " + args.join(" ");
|
||||
let logCommand = "";
|
||||
|
||||
logCommandTmp.split("\n").forEach((line) => {
|
||||
if (line.startsWith("set auth password=")) {
|
||||
logCommand += "set auth password=<redacted>";
|
||||
} else if (line.startsWith("set auth mutual_password=")) {
|
||||
logCommand += "set auth mutual_password=<redacted>";
|
||||
} else {
|
||||
logCommand += line;
|
||||
}
|
||||
|
||||
logCommand += "\n";
|
||||
});
|
||||
|
||||
driver.ctx.logger.verbose("nvmetCLI command: " + logCommand);
|
||||
//process.exit(0);
|
||||
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/127
|
||||
// https://bugs.launchpad.net/ubuntu/+source/python-configshell-fb/+bug/1776761
|
||||
// can apply the linked patch with some modifications to overcome the
|
||||
// KeyErrors or we can simply start a fake tty which does not seem to have
|
||||
// a detrimental effect, only affects Ubuntu 18.04 and older
|
||||
let options = {
|
||||
pty: true,
|
||||
};
|
||||
let response = await execClient.exec(
|
||||
execClient.buildCommand(command, args),
|
||||
options
|
||||
);
|
||||
driver.ctx.logger.verbose("nvmetCLI response: " + JSON.stringify(response));
|
||||
if (response.code != 0) {
|
||||
throw response;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
async spdkCliCommand(data) {
|
||||
const execClient = this.getExecClient();
|
||||
const driver = this;
|
||||
|
||||
data = data.trim();
|
||||
|
||||
let command = "sh";
|
||||
let args = ["-c"];
|
||||
|
||||
let cliArgs = [
|
||||
_.get(this.options, "nvmeof.shareStrategySpdkCli.spdkcliPath", "spdkcli"),
|
||||
];
|
||||
if (_.get(this.options, "nvmeof.shareStrategySpdkCli.sudoEnabled", false)) {
|
||||
cliArgs.unshift("sudo");
|
||||
}
|
||||
|
||||
let cliCommand = [];
|
||||
cliCommand.push(`echo "${data}"`.trim());
|
||||
cliCommand.push("|");
|
||||
cliCommand.push(cliArgs.join(" "));
|
||||
args.push("'" + cliCommand.join(" ") + "'");
|
||||
|
||||
let logCommandTmp = command + " " + args.join(" ");
|
||||
let logCommand = "";
|
||||
|
||||
logCommandTmp.split("\n").forEach((line) => {
|
||||
if (line.startsWith("set auth password=")) {
|
||||
logCommand += "set auth password=<redacted>";
|
||||
} else if (line.startsWith("set auth mutual_password=")) {
|
||||
logCommand += "set auth mutual_password=<redacted>";
|
||||
} else {
|
||||
logCommand += line;
|
||||
}
|
||||
|
||||
logCommand += "\n";
|
||||
});
|
||||
|
||||
driver.ctx.logger.verbose("spdkCLI command: " + logCommand);
|
||||
//process.exit(0);
|
||||
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/127
|
||||
// https://bugs.launchpad.net/ubuntu/+source/python-configshell-fb/+bug/1776761
|
||||
// can apply the linked patch with some modifications to overcome the
|
||||
// KeyErrors or we can simply start a fake tty which does not seem to have
|
||||
// a detrimental effect, only affects Ubuntu 18.04 and older
|
||||
let options = {
|
||||
pty: true,
|
||||
};
|
||||
let response = await execClient.exec(
|
||||
execClient.buildCommand(command, args),
|
||||
options
|
||||
);
|
||||
driver.ctx.logger.verbose("spdkCLI response: " + JSON.stringify(response));
|
||||
if (response.code != 0) {
|
||||
throw response;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.ControllerZfsGenericDriver = ControllerZfsGenericDriver;
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@ const _ = require("lodash");
|
|||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const LocalCliExecClient = require("./exec").LocalCliClient;
|
||||
const LocalCliExecClient =
|
||||
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
||||
const registry = require("../../utils/registry");
|
||||
const { Zetabyte } = require("../../utils/zfs");
|
||||
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ function factory(ctx, options) {
|
|||
case "zfs-generic-nfs":
|
||||
case "zfs-generic-smb":
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-nvmeof":
|
||||
return new ControllerZfsGenericDriver(ctx, options);
|
||||
case "zfs-local-dataset":
|
||||
case "zfs-local-zvol":
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ const _ = require("lodash");
|
|||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const registry = require("../../utils/registry");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||
const HttpClient = require("./http").Client;
|
||||
const TrueNASApiClient = require("./http/api").Api;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const { Mount } = require("../utils/mount");
|
|||
const { OneClient } = require("../utils/oneclient");
|
||||
const { Filesystem } = require("../utils/filesystem");
|
||||
const { ISCSI } = require("../utils/iscsi");
|
||||
const { NVMEoF } = require("../utils/nvmeof");
|
||||
const registry = require("../utils/registry");
|
||||
const semver = require("semver");
|
||||
const GeneralUtils = require("../utils/general");
|
||||
|
|
@ -139,6 +140,17 @@ class CsiBaseDriver {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an instance of the NVMEoF class
|
||||
*
|
||||
* @returns NVMEoF
|
||||
*/
|
||||
getDefaultNVMEoFInstance() {
|
||||
return registry.get(`${__REGISTRY_NS__}:default_nvmeof_instance`, () => {
|
||||
return new NVMEoF();
|
||||
});
|
||||
}
|
||||
|
||||
getDefaultZetabyteInstance() {
|
||||
return registry.get(`${__REGISTRY_NS__}:default_zb_instance`, () => {
|
||||
return new Zetabyte({
|
||||
|
|
@ -560,6 +572,7 @@ class CsiBaseDriver {
|
|||
const mount = driver.getDefaultMountInstance();
|
||||
const filesystem = driver.getDefaultFilesystemInstance();
|
||||
const iscsi = driver.getDefaultISCSIInstance();
|
||||
const nvmeof = driver.getDefaultNVMEoFInstance();
|
||||
let result;
|
||||
let device;
|
||||
let block_device_info;
|
||||
|
|
@ -792,7 +805,11 @@ class CsiBaseDriver {
|
|||
await iscsi.iscsiadm.rescanSession(session);
|
||||
|
||||
// find device name
|
||||
device = iscsi.devicePathByPortalIQNLUN(iscsiConnection.portal, iscsiConnection.iqn, iscsiConnection.lun)
|
||||
device = iscsi.devicePathByPortalIQNLUN(
|
||||
iscsiConnection.portal,
|
||||
iscsiConnection.iqn,
|
||||
iscsiConnection.lun
|
||||
);
|
||||
let deviceByPath = device;
|
||||
|
||||
// can take some time for device to show up, loop for some period
|
||||
|
|
@ -887,6 +904,233 @@ class CsiBaseDriver {
|
|||
}
|
||||
|
||||
break;
|
||||
|
||||
case "nvmeof":
|
||||
{
|
||||
let transports = [];
|
||||
if (volume_context.transport) {
|
||||
transports.push(volume_context.transport.trim());
|
||||
}
|
||||
|
||||
if (volume_context.transports) {
|
||||
volume_context.transports.split(",").forEach((transport) => {
|
||||
transports.push(transport.trim());
|
||||
});
|
||||
}
|
||||
|
||||
// ensure unique entries only
|
||||
transports = [...new Set(transports)];
|
||||
|
||||
// stores actual device paths after nvmeof login
|
||||
let nvmeofControllerDevices = [];
|
||||
let nvmeofNamespaceDevices = [];
|
||||
|
||||
// stores configuration of targets/iqn/luns to connect to
|
||||
let nvmeofConnections = [];
|
||||
for (let transport of transports) {
|
||||
nvmeofConnections.push({
|
||||
transport,
|
||||
nqn: volume_context.nqn,
|
||||
nsid: volume_context.nsid,
|
||||
});
|
||||
}
|
||||
|
||||
for (let nvmeofConnection of nvmeofConnections) {
|
||||
// connect
|
||||
try {
|
||||
await GeneralUtils.retry(15, 2000, async () => {
|
||||
await nvmeof.connectByNQNTransport(
|
||||
nvmeofConnection.nqn,
|
||||
nvmeofConnection.transport
|
||||
);
|
||||
});
|
||||
} catch (err) {
|
||||
driver.ctx.logger.warn(
|
||||
`error: ${JSON.stringify(err)} connecting to transport: ${
|
||||
nvmeofConnection.transport
|
||||
}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// find controller device
|
||||
let controllerDevice;
|
||||
try {
|
||||
await GeneralUtils.retry(15, 2000, async () => {
|
||||
controllerDevice =
|
||||
await nvmeof.controllerDevicePathByTransportNQN(
|
||||
nvmeofConnection.transport,
|
||||
nvmeofConnection.nqn,
|
||||
nvmeofConnection.nsid
|
||||
);
|
||||
|
||||
if (!controllerDevice) {
|
||||
throw new Error(`failed to find controller device`);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
driver.ctx.logger.warn(
|
||||
`error finding nvme controller device: ${JSON.stringify(
|
||||
err
|
||||
)}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// find namespace device
|
||||
let namespaceDevice;
|
||||
try {
|
||||
await GeneralUtils.retry(15, 2000, async () => {
|
||||
namespaceDevice =
|
||||
await nvmeof.namespaceDevicePathByNQNNamespace(
|
||||
nvmeofConnection.nqn,
|
||||
nvmeofConnection.nsid
|
||||
);
|
||||
if (!controllerDevice) {
|
||||
throw new Error(`failed to find namespace device`);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
driver.ctx.logger.warn(
|
||||
`error finding nvme namespace device: ${JSON.stringify(
|
||||
err
|
||||
)}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// sanity check for device files
|
||||
if (!namespaceDevice) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// sanity check for device files
|
||||
if (!controllerDevice) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// rescan in scenarios when login previously occurred but volumes never appeared
|
||||
// must be the NVMe char device, not the namespace device
|
||||
await nvmeof.rescanNamespace(controllerDevice);
|
||||
|
||||
// can take some time for device to show up, loop for some period
|
||||
result = await filesystem.pathExists(namespaceDevice);
|
||||
let timer_start = Math.round(new Date().getTime() / 1000);
|
||||
let timer_max = 30;
|
||||
let deviceCreated = result;
|
||||
while (!result) {
|
||||
await GeneralUtils.sleep(2000);
|
||||
result = await filesystem.pathExists(namespaceDevice);
|
||||
|
||||
if (result) {
|
||||
deviceCreated = true;
|
||||
break;
|
||||
}
|
||||
|
||||
let current_time = Math.round(new Date().getTime() / 1000);
|
||||
if (!result && current_time - timer_start > timer_max) {
|
||||
driver.ctx.logger.warn(
|
||||
`hit timeout waiting for namespace device node to appear: ${namespaceDevice}`
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (deviceCreated) {
|
||||
device = await filesystem.realpath(namespaceDevice);
|
||||
nvmeofControllerDevices.push(controllerDevice);
|
||||
nvmeofNamespaceDevices.push(namespaceDevice);
|
||||
|
||||
driver.ctx.logger.info(
|
||||
`successfully logged into nvmeof transport ${nvmeofConnection.transport} and created controller device: ${controllerDevice}, namespace device: ${namespaceDevice}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// let things settle
|
||||
// this will help in dm scenarios
|
||||
await GeneralUtils.sleep(2000);
|
||||
|
||||
// filter duplicates
|
||||
nvmeofNamespaceDevices = nvmeofNamespaceDevices.filter(
|
||||
(value, index, self) => {
|
||||
return self.indexOf(value) === index;
|
||||
}
|
||||
);
|
||||
|
||||
nvmeofControllerDevices = nvmeofControllerDevices.filter(
|
||||
(value, index, self) => {
|
||||
return self.indexOf(value) === index;
|
||||
}
|
||||
);
|
||||
|
||||
// only throw an error if we were not able to attach to *any* devices
|
||||
if (nvmeofNamespaceDevices.length < 1) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`unable to attach any nvme devices`
|
||||
);
|
||||
}
|
||||
|
||||
if (nvmeofControllerDevices.length != nvmeofConnections.length) {
|
||||
driver.ctx.logger.warn(
|
||||
`failed to attach all nvmeof devices/subsystems/transports`
|
||||
);
|
||||
|
||||
// TODO: allow a parameter to control this behavior in some form
|
||||
if (false) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`unable to attach all iscsi devices`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* NVMEoF has native multipath capabilities without using device mapper
|
||||
* You can disable the built-in using kernel param nvme_core.multipath=N/Y
|
||||
*/
|
||||
let useNativeMultipath = await nvmeof.nativeMultipathEnabled();
|
||||
|
||||
if (useNativeMultipath) {
|
||||
// only throw an error if we were not able to attach to *any* devices
|
||||
if (nvmeofNamespaceDevices.length > 1) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`too many nvme namespace devices, native multipath enabled therefore should only have 1`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// compare all device-mapper slaves with the newly created devices
|
||||
// if any of the new devices are device-mapper slaves treat this as a
|
||||
// multipath scenario
|
||||
let allDeviceMapperSlaves =
|
||||
await filesystem.getAllDeviceMapperSlaveDevices();
|
||||
let commonDevices = allDeviceMapperSlaves.filter((value) =>
|
||||
nvmeofNamespaceDevices.includes(value)
|
||||
);
|
||||
|
||||
const useDMMultipath =
|
||||
nvmeofConnections.length > 1 || commonDevices.length > 0;
|
||||
|
||||
// discover multipath device to use
|
||||
if (useDMMultipath) {
|
||||
device = await filesystem.getDeviceMapperDeviceFromSlaves(
|
||||
nvmeofNamespaceDevices,
|
||||
false
|
||||
);
|
||||
|
||||
if (!device) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`failed to discover multipath device`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case "hostpath":
|
||||
result = await mount.pathIsMounted(staging_target_path);
|
||||
// if not mounted, mount
|
||||
|
|
@ -989,6 +1233,7 @@ class CsiBaseDriver {
|
|||
let is_block = false;
|
||||
switch (node_attach_driver) {
|
||||
case "iscsi":
|
||||
case "nvmeof":
|
||||
is_block = true;
|
||||
break;
|
||||
case "zfs-local":
|
||||
|
|
@ -1093,6 +1338,7 @@ class CsiBaseDriver {
|
|||
fs_type = "cifs";
|
||||
break;
|
||||
case "iscsi":
|
||||
case "nvmeof":
|
||||
fs_type = "ext4";
|
||||
break;
|
||||
default:
|
||||
|
|
@ -1988,6 +2234,7 @@ class CsiBaseDriver {
|
|||
const mount = driver.getDefaultMountInstance();
|
||||
const filesystem = driver.getDefaultFilesystemInstance();
|
||||
const iscsi = driver.getDefaultISCSIInstance();
|
||||
const nvmeof = driver.getDefaultNVMEoFInstance();
|
||||
let result;
|
||||
let is_block = false;
|
||||
let is_device_mapper = false;
|
||||
|
|
@ -2211,6 +2458,13 @@ class CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (await filesystem.deviceIsNVMEoF(block_device_info_i.path)) {
|
||||
let nqn = await nvmeof.nqnByNamespaceDeviceName(
|
||||
block_device_info_i.name
|
||||
);
|
||||
await nvmeof.disconnectByNQN(nqn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -129,6 +129,7 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
driverResourceType = "filesystem";
|
||||
break;
|
||||
case "iscsi":
|
||||
case "nvmeof":
|
||||
driverResourceType = "volume";
|
||||
fs_types = ["btrfs", "ext3", "ext4", "ext4dev", "xfs"];
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ const { GrpcError, grpc } = require("../../utils/grpc");
|
|||
const { Filesystem } = require("../../utils/filesystem");
|
||||
const registry = require("../../utils/registry");
|
||||
const semver = require("semver");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
// zfs common properties
|
||||
|
|
|
|||
|
|
@ -504,7 +504,8 @@ class Filesystem {
|
|||
* lsblk
|
||||
* blkid
|
||||
*/
|
||||
const strategy = process.env.FILESYSTEM_TYPE_DETECTION_STRATEGY || "lsblk";
|
||||
const strategy =
|
||||
process.env.FILESYSTEM_TYPE_DETECTION_STRATEGY || "lsblk";
|
||||
|
||||
switch (strategy) {
|
||||
// requires udev data to be present otherwise fstype property is always null but otherwise succeeds
|
||||
|
|
@ -547,6 +548,21 @@ class Filesystem {
|
|||
return result && result.tran == "iscsi";
|
||||
}
|
||||
|
||||
async deviceIsNVMEoF(device) {
|
||||
const filesystem = this;
|
||||
let result;
|
||||
|
||||
do {
|
||||
if (result) {
|
||||
device = `/dev/${result.pkname}`;
|
||||
}
|
||||
result = await filesystem.getBlockDevice(device);
|
||||
} while (result.pkname);
|
||||
|
||||
// TODO: add further logic here to ensure the device is not a local pcie/etc device
|
||||
return result && result.tran == "nvme";
|
||||
}
|
||||
|
||||
async getBlockDeviceParent(device) {
|
||||
const filesystem = this;
|
||||
let result;
|
||||
|
|
|
|||
|
|
@ -8,6 +8,17 @@ function sleep(ms) {
|
|||
});
|
||||
}
|
||||
|
||||
function trimchar(str, ch) {
|
||||
var start = 0,
|
||||
end = str.length;
|
||||
|
||||
while (start < end && str[start] === ch) ++start;
|
||||
|
||||
while (end > start && str[end - 1] === ch) --end;
|
||||
|
||||
return start > 0 || end < str.length ? str.substring(start, end) : str;
|
||||
}
|
||||
|
||||
function md5(val) {
|
||||
return crypto.createHash("md5").update(val).digest("hex");
|
||||
}
|
||||
|
|
@ -265,3 +276,4 @@ module.exports.default_supported_block_filesystems =
|
|||
module.exports.default_supported_file_filesystems =
|
||||
default_supported_file_filesystems;
|
||||
module.exports.retry = retry;
|
||||
module.exports.trimchar = trimchar;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,321 @@
|
|||
const cp = require("child_process");
|
||||
const { trimchar } = require("./general");
|
||||
const URI = require("uri-js");
|
||||
const { deleteItems } = require("@kubernetes/client-node");
|
||||
|
||||
const DEFAULT_TIMEOUT = process.env.NVMEOF_DEFAULT_TIMEOUT || 30000;
|
||||
|
||||
class NVMEoF {
|
||||
constructor(options = {}) {
|
||||
const nvmeof = this;
|
||||
nvmeof.options = options;
|
||||
|
||||
options.paths = options.paths || {};
|
||||
if (!options.paths.nvme) {
|
||||
options.paths.nvme = "nvme";
|
||||
}
|
||||
|
||||
if (!options.paths.sudo) {
|
||||
options.paths.sudo = "/usr/bin/sudo";
|
||||
}
|
||||
|
||||
if (!options.executor) {
|
||||
options.executor = {
|
||||
spawn: cp.spawn,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all NVMe devices and namespaces on machine
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async list(args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("list", "-o", "json");
|
||||
let result = await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* List nvme subsystems
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async listSubsys(args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("list-subsys", "-o", "json");
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to NVMeoF subsystem
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async connectByNQNTransport(nqn, transport, args = []) {
|
||||
const nvmeof = this;
|
||||
transport = nvmeof.parseTransport(transport);
|
||||
|
||||
let transport_args = [];
|
||||
if (transport.type) {
|
||||
transport_args.push("--transport", transport.type);
|
||||
}
|
||||
if (transport.address) {
|
||||
transport_args.push("--traddr", transport.address);
|
||||
}
|
||||
if (transport.service) {
|
||||
transport_args.push("--trsvcid", transport.service);
|
||||
}
|
||||
|
||||
args.unshift("connect", "-o", "json", "--nqn", nqn, ...transport_args);
|
||||
|
||||
try {
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
} catch (err) {
|
||||
if (err.stderr && err.stderr.includes("already connnected")) {
|
||||
// idempotent
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from NVMeoF subsystem
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async disconnectByNQN(nqn, args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("disconnect", "--nqn", nqn);
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from NVMeoF subsystem
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async disconnectByDevice(device, args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("disconnect", "--device", device);
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Rescans the NVME namespaces
|
||||
*
|
||||
* @param {*} device
|
||||
* @param {*} args
|
||||
*/
|
||||
async rescanNamespace(device, args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("ns-rescan", device);
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
}
|
||||
|
||||
parseTransport(transport) {
|
||||
if (typeof transport === "object") {
|
||||
return transport;
|
||||
}
|
||||
|
||||
transport = transport.trim();
|
||||
const parsed = URI.parse(transport);
|
||||
|
||||
let type = parsed.scheme;
|
||||
let address = parsed.host;
|
||||
let service;
|
||||
switch (parsed.scheme) {
|
||||
case "fc":
|
||||
case "rdma":
|
||||
case "tcp":
|
||||
type = parsed.scheme;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`unknown nvme transport type: ${parsed.scheme}`);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case "fc":
|
||||
address = trimchar(address, "[");
|
||||
address = trimchar(address, "]");
|
||||
break;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case "rdma":
|
||||
case "tcp":
|
||||
service = parsed.port;
|
||||
|
||||
if (!service) {
|
||||
service = 4420;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return {
|
||||
type,
|
||||
address,
|
||||
service,
|
||||
};
|
||||
}
|
||||
|
||||
async nativeMultipathEnabled() {
|
||||
const nvmeof = this;
|
||||
let result = await nvmeof.exec("cat", [
|
||||
"/sys/module/nvme_core/parameters/multipath",
|
||||
]);
|
||||
return result.stdout.trim() == "Y";
|
||||
}
|
||||
|
||||
async namespaceDevicePathByNQNNamespace(nqn, namespace) {
|
||||
const nvmeof = this;
|
||||
let result = await nvmeof.list(["-v"]);
|
||||
for (let device of result.Devices) {
|
||||
for (let subsytem of device.Subsystems) {
|
||||
if (subsytem.SubsystemNQN != nqn) {
|
||||
continue;
|
||||
} else {
|
||||
for (let i_namespace of subsytem.Namespaces) {
|
||||
if (i_namespace.NSID != namespace) {
|
||||
continue;
|
||||
} else {
|
||||
return `/dev/${i_namespace.NameSpace}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async controllerDevicePathByTransportNQN(transport, nqn) {
|
||||
const nvmeof = this;
|
||||
transport = nvmeof.parseTransport(transport);
|
||||
let result = await nvmeof.list(["-v"]);
|
||||
for (let device of result.Devices) {
|
||||
for (let subsytem of device.Subsystems) {
|
||||
if (subsytem.SubsystemNQN != nqn) {
|
||||
continue;
|
||||
} else {
|
||||
for (let controller of subsytem.Controllers) {
|
||||
if (controller.Transport != transport.type) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let controllerAddress = controller.Address;
|
||||
let parts = controllerAddress.split(",");
|
||||
|
||||
let traddr;
|
||||
let trsvcid;
|
||||
for (let i_part of parts) {
|
||||
let i_parts = i_part.split("=");
|
||||
switch (i_parts[0]) {
|
||||
case "traddr":
|
||||
traddr = i_parts[1];
|
||||
break;
|
||||
case "trsvcid":
|
||||
trsvcid = i_parts[1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (traddr != transport.address) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (transport.service && trsvcid != transport.service) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return `/dev/${controller.Controller}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async nqnByNamespaceDeviceName(name) {
|
||||
const nvmeof = this;
|
||||
name = name.replace("/dev/", "");
|
||||
let result = await nvmeof.list(["-v"]);
|
||||
for (let device of result.Devices) {
|
||||
for (let subsytem of device.Subsystems) {
|
||||
for (let namespace of subsytem.Namespaces) {
|
||||
if (namespace.NameSpace != name) {
|
||||
continue;
|
||||
} else {
|
||||
return subsytem.SubsystemNQN;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
devicePathByModelNumberSerialNumber(modelNumber, serialNumber) {
|
||||
modelNumber = modelNumber.replaceAll(" ", "_");
|
||||
serialNumber = serialNumber.replaceAll(" ", "_");
|
||||
return `/dev/disk/by-id/nvme-${modelNumber}_${serialNumber}`;
|
||||
}
|
||||
|
||||
devicePathByPortalIQNLUN(portal, iqn, lun) {
|
||||
const parsedPortal = this.parsePortal(portal);
|
||||
const portalHost = parsedPortal.host
|
||||
.replaceAll("[", "")
|
||||
.replaceAll("]", "");
|
||||
return `/dev/disk/by-path/ip-${portalHost}:${parsedPortal.port}-iscsi-${iqn}-lun-${lun}`;
|
||||
}
|
||||
|
||||
exec(command, args, options = {}) {
|
||||
if (!options.hasOwnProperty("timeout")) {
|
||||
options.timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
|
||||
const nvmeof = this;
|
||||
args = args || [];
|
||||
|
||||
if (nvmeof.options.sudo) {
|
||||
args.unshift(command);
|
||||
command = nvmeof.options.paths.sudo;
|
||||
}
|
||||
|
||||
console.log("executing nvmeof command: %s %s", command, args.join(" "));
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = nvmeof.options.executor.spawn(command, args, options);
|
||||
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
|
||||
child.stdout.on("data", function (data) {
|
||||
stdout = stdout + data;
|
||||
});
|
||||
|
||||
child.stderr.on("data", function (data) {
|
||||
stderr = stderr + data;
|
||||
});
|
||||
|
||||
child.on("close", function (code) {
|
||||
const result = { code, stdout, stderr, timeout: false };
|
||||
try {
|
||||
result.parsed = JSON.parse(result.stdout);
|
||||
} catch (err) {}
|
||||
|
||||
// timeout scenario
|
||||
if (code === null) {
|
||||
result.timeout = true;
|
||||
reject(result);
|
||||
}
|
||||
|
||||
if (code) {
|
||||
reject(result);
|
||||
} else {
|
||||
resolve(result);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.NVMEoF = NVMEoF;
|
||||
Loading…
Reference in New Issue