zfs-local-{dataset,zvol}
Signed-off-by: Travis Glenn Hansen <travisghansen@yahoo.com>
This commit is contained in:
parent
46e504dd85
commit
5426f1ec12
|
|
@ -114,12 +114,33 @@ jobs:
|
|||
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
|
||||
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
|
||||
|
||||
# zfs-local drivers
|
||||
csi-sanity-zfs-local:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- zfs-local/zvol.yaml
|
||||
- zfs-local/dataset.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-zfs-local
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
|
||||
build:
|
||||
needs:
|
||||
- csi-sanity-synology
|
||||
- csi-sanity-truenas-scale
|
||||
- csi-sanity-truenas-core
|
||||
- csi-sanity-zfs-generic
|
||||
- csi-sanity-zfs-local
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
|
|||
10
README.md
10
README.md
|
|
@ -24,6 +24,8 @@ have access to resizing, snapshots, clones, etc functionality.
|
|||
- `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-local-ephemeral-inline` (provisions node-local zfs datasets)
|
||||
- `zfs-local-dataset` (provision node-local volume as dataset)
|
||||
- `zfs-local-zvol` (provision node-local volume as zvol)
|
||||
- `synology-iscsi` experimental (manages volumes to share over iscsi)
|
||||
- `lustre-client` (crudely provisions storage using a shared lustre
|
||||
share/directory for all volumes)
|
||||
|
|
@ -141,6 +143,13 @@ necessary.
|
|||
- https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
|
||||
- https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
|
||||
|
||||
### zfs-local-{dataset,zvol}
|
||||
|
||||
This `driver` provisions node-local storage. Each node should have an
|
||||
identically named zfs pool created and avaialble to the `driver`. Note, this is
|
||||
_NOT_ the same thing as using the docker zfs storage driver (although the same
|
||||
pool could be used). No other requirements are necessary.
|
||||
|
||||
## Server Prep
|
||||
|
||||
Server preparation depends slightly on which `driver` you are using.
|
||||
|
|
@ -371,3 +380,4 @@ A special shout out to the wonderful sponsors of the project!
|
|||
- https://datamattsson.tumblr.com/post/624751011659202560/welcome-truenas-core-container-storage-provider
|
||||
- https://github.com/dravanet/truenas-csi
|
||||
- https://github.com/SynologyOpenSource/synology-csi
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,10 @@
|
|||
driver: zfs-local-dataset
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
driver: zfs-local-zvol
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" zfs "${@:1}"
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" zpool "${@:1}"
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
driver: zfs-local-dataset
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/k8s/local/v
|
||||
detachedSnapshotsDatasetParentName: tank/k8s/local/s
|
||||
|
||||
datasetProperties:
|
||||
# key: value
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
driver: zfs-local-zvol
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/k8s/local/v
|
||||
detachedSnapshotsDatasetParentName: tank/k8s/local/s
|
||||
|
||||
datasetProperties:
|
||||
# key: value
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
const cp = require("child_process");
|
||||
|
||||
class LocalCliExecClient {
|
||||
constructor(options = {}) {
|
||||
this.options = options;
|
||||
if (this.options.logger) {
|
||||
this.logger = this.options.logger;
|
||||
} else {
|
||||
this.logger = console;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a command line from the name and given args
|
||||
* TODO: escape the arguments
|
||||
*
|
||||
* @param {*} name
|
||||
* @param {*} args
|
||||
*/
|
||||
buildCommand(name, args = []) {
|
||||
args.unshift(name);
|
||||
return args.join(" ");
|
||||
}
|
||||
|
||||
debug() {
|
||||
this.logger.silly(...arguments);
|
||||
}
|
||||
|
||||
async exec(command, options = {}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.logger.verbose("LocalCliExecClient command: " + command);
|
||||
let process = cp.exec(command, (err, stdout, stderr) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
}
|
||||
resolve({
|
||||
stderr,
|
||||
stdout,
|
||||
code: process.exitCode,
|
||||
signal: process.exitSignal,
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* simple wrapper for logging
|
||||
*/
|
||||
spawn() {
|
||||
const command = this.buildCommand(arguments[0], arguments[1]);
|
||||
this.logger.verbose("LocalCliExecClient command: " + command);
|
||||
return cp.exec(command);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.LocalCliClient = LocalCliExecClient;
|
||||
|
|
@ -0,0 +1,194 @@
|
|||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const LocalCliExecClient = require("./exec").LocalCliClient;
|
||||
const os = require("os");
|
||||
const { Zetabyte } = require("../../utils/zfs");
|
||||
|
||||
const ZFS_ASSET_NAME_PROPERTY_NAME = "zfs_asset_name";
|
||||
const NODE_TOPOLOGY_KEY_NAME = "org.democratic-csi.topology/node";
|
||||
|
||||
class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
||||
getExecClient() {
|
||||
return new LocalCliExecClient({
|
||||
logger: this.ctx.logger,
|
||||
});
|
||||
}
|
||||
|
||||
async getZetabyte() {
|
||||
const execClient = this.getExecClient();
|
||||
|
||||
const options = {};
|
||||
options.executor = execClient;
|
||||
options.idempotent = true;
|
||||
|
||||
/*
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("paths")
|
||||
) {
|
||||
options.paths = this.options.zfs.cli.paths;
|
||||
}
|
||||
*/
|
||||
|
||||
// use env based paths to allow for custom wrapper scripts to chroot to the host
|
||||
options.paths = {
|
||||
zfs: "zfs",
|
||||
zpool: "zpool",
|
||||
sudo: "sudo",
|
||||
chroot: "chroot",
|
||||
};
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("sudoEnabled")
|
||||
) {
|
||||
options.sudo = this.getSudoEnabled();
|
||||
}
|
||||
|
||||
if (typeof this.setZetabyteCustomOptions === "function") {
|
||||
await this.setZetabyteCustomOptions(options);
|
||||
}
|
||||
|
||||
return new Zetabyte(options);
|
||||
}
|
||||
|
||||
/**
|
||||
* cannot make this a storage class parameter as storage class/etc context is *not* sent
|
||||
* into various calls such as GetControllerCapabilities etc
|
||||
*/
|
||||
getDriverZfsResourceType() {
|
||||
switch (this.options.driver) {
|
||||
case "zfs-local-dataset":
|
||||
return "filesystem";
|
||||
case "zfs-local-zvol":
|
||||
return "volume";
|
||||
default:
|
||||
throw new Error("unknown driver: " + this.ctx.args.driver);
|
||||
}
|
||||
}
|
||||
|
||||
getFSTypes() {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return ["zfs"];
|
||||
case "volume":
|
||||
return ["ext3", "ext4", "ext4dev", "xfs"];
|
||||
}
|
||||
}
|
||||
|
||||
getAccessModes() {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
//"MULTI_NODE_READER_ONLY",
|
||||
//"MULTI_NODE_SINGLE_WRITER",
|
||||
//"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
case "volume":
|
||||
return [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
//"MULTI_NODE_READER_ONLY",
|
||||
//"MULTI_NODE_SINGLE_WRITER",
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* csi controller service
|
||||
*
|
||||
* should create any necessary share resources and return volume context
|
||||
*
|
||||
* @param {*} datasetName
|
||||
*/
|
||||
async createShare(call, datasetName) {
|
||||
let volume_context = {};
|
||||
|
||||
switch (this.options.driver) {
|
||||
case "zfs-local-dataset":
|
||||
volume_context = {
|
||||
node_attach_driver: "zfs-local",
|
||||
[ZFS_ASSET_NAME_PROPERTY_NAME]: datasetName,
|
||||
};
|
||||
return volume_context;
|
||||
|
||||
case "zfs-local-zvol":
|
||||
volume_context = {
|
||||
node_attach_driver: "zfs-local",
|
||||
[ZFS_ASSET_NAME_PROPERTY_NAME]: datasetName,
|
||||
};
|
||||
return volume_context;
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`invalid configuration: unknown driver ${this.options.driver}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* csi controller service
|
||||
*
|
||||
* @param {*} call
|
||||
* @param {*} datasetName
|
||||
* @returns
|
||||
*/
|
||||
async deleteShare(call, datasetName) {
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* csi controller service
|
||||
*
|
||||
* @param {*} call
|
||||
* @param {*} datasetName
|
||||
*/
|
||||
async expandVolume(call, datasetName) {}
|
||||
|
||||
/**
|
||||
* List of topologies associated with the *volume*
|
||||
*
|
||||
* @returns array
|
||||
*/
|
||||
async getAccessibleTopology() {
|
||||
const response = await super.NodeGetInfo(...arguments);
|
||||
return [
|
||||
{
|
||||
segments: {
|
||||
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Add node topologies
|
||||
*
|
||||
* @param {*} call
|
||||
* @returns
|
||||
*/
|
||||
async NodeGetInfo(call) {
|
||||
const response = await super.NodeGetInfo(...arguments);
|
||||
response.accessible_topology = {
|
||||
segments: {
|
||||
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
|
||||
},
|
||||
};
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.ControllerZfsLocalDriver = ControllerZfsLocalDriver;
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
const { FreeNASSshDriver } = require("./freenas/ssh");
|
||||
const { FreeNASApiDriver } = require("./freenas/api");
|
||||
const { ControllerZfsGenericDriver } = require("./controller-zfs-generic");
|
||||
const { ControllerZfsLocalDriver } = require("./controller-zfs-local");
|
||||
const {
|
||||
ZfsLocalEphemeralInlineDriver,
|
||||
} = require("./zfs-local-ephemeral-inline");
|
||||
|
|
@ -31,6 +32,9 @@ function factory(ctx, options) {
|
|||
case "zfs-generic-nfs":
|
||||
case "zfs-generic-iscsi":
|
||||
return new ControllerZfsGenericDriver(ctx, options);
|
||||
case "zfs-local-dataset":
|
||||
case "zfs-local-zvol":
|
||||
return new ControllerZfsLocalDriver(ctx, options);
|
||||
case "zfs-local-ephemeral-inline":
|
||||
return new ZfsLocalEphemeralInlineDriver(ctx, options);
|
||||
case "smb-client":
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const _ = require("lodash");
|
||||
const cp = require("child_process");
|
||||
const os = require("os");
|
||||
const fs = require("fs");
|
||||
const { GrpcError, grpc } = require("../utils/grpc");
|
||||
|
|
@ -7,6 +8,7 @@ const { Filesystem } = require("../utils/filesystem");
|
|||
const { ISCSI } = require("../utils/iscsi");
|
||||
const semver = require("semver");
|
||||
const sleep = require("../utils/general").sleep;
|
||||
const { Zetabyte } = require("../utils/zfs");
|
||||
|
||||
/**
|
||||
* common code shared between all drivers
|
||||
|
|
@ -565,6 +567,54 @@ class CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
break;
|
||||
case "zfs-local":
|
||||
// TODO: make this a geneic zb instance (to ensure works with node-manual driver)
|
||||
const zb = new Zetabyte({
|
||||
idempotent: true,
|
||||
paths: {
|
||||
zfs: "zfs",
|
||||
zpool: "zpool",
|
||||
sudo: "sudo",
|
||||
chroot: "chroot",
|
||||
},
|
||||
//logger: driver.ctx.logger,
|
||||
executor: {
|
||||
spawn: function () {
|
||||
const command = `${arguments[0]} ${arguments[1].join(" ")}`;
|
||||
return cp.exec(command);
|
||||
},
|
||||
},
|
||||
log_commands: true,
|
||||
});
|
||||
result = await zb.zfs.get(`${volume_context.zfs_asset_name}`, [
|
||||
"type",
|
||||
"mountpoint",
|
||||
]);
|
||||
result = result[`${volume_context.zfs_asset_name}`];
|
||||
switch (result.type.value) {
|
||||
case "filesystem":
|
||||
if (result.mountpoint.value != "legacy") {
|
||||
// zfs set mountpoint=legacy <dataset>
|
||||
// zfs inherit mountpoint <dataset>
|
||||
await zb.zfs.set(`${volume_context.zfs_asset_name}`, {
|
||||
mountpoint: "legacy",
|
||||
});
|
||||
}
|
||||
device = `${volume_context.zfs_asset_name}`;
|
||||
if (!fs_type) {
|
||||
fs_type = "zfs";
|
||||
}
|
||||
break;
|
||||
case "volume":
|
||||
device = `/dev/zvol/${volume_context.zfs_asset_name}`;
|
||||
break;
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`unknown zfs asset type: ${result.type.value}`
|
||||
);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.INVALID_ARGUMENT,
|
||||
|
|
@ -574,53 +624,59 @@ class CsiBaseDriver {
|
|||
|
||||
switch (access_type) {
|
||||
case "mount":
|
||||
let is_block = false;
|
||||
switch (node_attach_driver) {
|
||||
// block specific logic
|
||||
case "iscsi":
|
||||
if (!fs_type) {
|
||||
fs_type = "ext4";
|
||||
}
|
||||
is_block = true;
|
||||
break;
|
||||
case "zfs-local":
|
||||
is_block = device.startsWith("/dev/zvol/");
|
||||
break;
|
||||
}
|
||||
|
||||
if (await filesystem.isBlockDevice(device)) {
|
||||
// format
|
||||
result = await filesystem.deviceIsFormatted(device);
|
||||
if (!result) {
|
||||
let formatOptions = _.get(
|
||||
driver.options.node.format,
|
||||
[fs_type, "customOptions"],
|
||||
[]
|
||||
);
|
||||
if (!Array.isArray(formatOptions)) {
|
||||
formatOptions = [];
|
||||
}
|
||||
await filesystem.formatDevice(device, fs_type, formatOptions);
|
||||
}
|
||||
if (is_block) {
|
||||
// block specific logic
|
||||
if (!fs_type) {
|
||||
fs_type = "ext4";
|
||||
}
|
||||
|
||||
let fs_info = await filesystem.getDeviceFilesystemInfo(device);
|
||||
fs_type = fs_info.type;
|
||||
|
||||
// fsck
|
||||
result = await mount.deviceIsMountedAtPath(
|
||||
device,
|
||||
staging_target_path
|
||||
if (await filesystem.isBlockDevice(device)) {
|
||||
// format
|
||||
result = await filesystem.deviceIsFormatted(device);
|
||||
if (!result) {
|
||||
let formatOptions = _.get(
|
||||
driver.options.node.format,
|
||||
[fs_type, "customOptions"],
|
||||
[]
|
||||
);
|
||||
if (!result) {
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/52#issuecomment-768463401
|
||||
let checkFilesystem =
|
||||
driver.options.node.mount.checkFilesystem[fs_type] || {};
|
||||
if (checkFilesystem.enabled) {
|
||||
await filesystem.checkFilesystem(
|
||||
device,
|
||||
fs_type,
|
||||
checkFilesystem.customOptions || [],
|
||||
checkFilesystem.customFilesystemOptions || []
|
||||
);
|
||||
}
|
||||
if (!Array.isArray(formatOptions)) {
|
||||
formatOptions = [];
|
||||
}
|
||||
await filesystem.formatDevice(device, fs_type, formatOptions);
|
||||
}
|
||||
|
||||
let fs_info = await filesystem.getDeviceFilesystemInfo(device);
|
||||
fs_type = fs_info.type;
|
||||
|
||||
// fsck
|
||||
result = await mount.deviceIsMountedAtPath(
|
||||
device,
|
||||
staging_target_path
|
||||
);
|
||||
if (!result) {
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/52#issuecomment-768463401
|
||||
let checkFilesystem =
|
||||
driver.options.node.mount.checkFilesystem[fs_type] || {};
|
||||
if (checkFilesystem.enabled) {
|
||||
await filesystem.checkFilesystem(
|
||||
device,
|
||||
fs_type,
|
||||
checkFilesystem.customOptions || [],
|
||||
checkFilesystem.customFilesystemOptions || []
|
||||
);
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
result = await mount.deviceIsMountedAtPath(device, staging_target_path);
|
||||
|
|
@ -1012,6 +1068,7 @@ class CsiBaseDriver {
|
|||
case "smb":
|
||||
case "lustre":
|
||||
case "iscsi":
|
||||
case "zfs-local":
|
||||
// ensure appropriate directories/files
|
||||
switch (access_type) {
|
||||
case "mount":
|
||||
|
|
@ -1348,12 +1405,17 @@ class CsiBaseDriver {
|
|||
rescan_devices.push(device);
|
||||
|
||||
for (let sdevice of rescan_devices) {
|
||||
// TODO: technically rescan is only relevant/available for remote drives
|
||||
// such as iscsi etc, should probably limit this call as appropriate
|
||||
// for now crudely checking the scenario inside the method itself
|
||||
await filesystem.rescanDevice(sdevice);
|
||||
}
|
||||
|
||||
// let things settle
|
||||
// it appears the dm devices can take a second to figure things out
|
||||
await sleep(2000);
|
||||
if (is_device_mapper || true) {
|
||||
await sleep(2000);
|
||||
}
|
||||
|
||||
if (is_formatted && access_type == "mount") {
|
||||
fs_info = await filesystem.getDeviceFilesystemInfo(device);
|
||||
|
|
|
|||
|
|
@ -431,8 +431,12 @@ class Filesystem {
|
|||
|
||||
// echo 1 > /sys/block/sdb/device/rescan
|
||||
const sys_file = `/sys/block/${device_name}/device/rescan`;
|
||||
console.log(`executing filesystem command: echo 1 > ${sys_file}`);
|
||||
fs.writeFileSync(sys_file, "1");
|
||||
|
||||
// node-local devices cannot be rescanned, so ignore
|
||||
if (await filesystem.pathExists(sys_file)) {
|
||||
console.log(`executing filesystem command: echo 1 > ${sys_file}`);
|
||||
fs.writeFileSync(sys_file, "1");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ FINDMNT_COMMON_OPTIONS = [
|
|||
"--nofsroot", // prevents unwanted behavior with cifs volumes
|
||||
];
|
||||
|
||||
DEFAUT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
|
||||
DEFAULT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
|
||||
|
||||
class Mount {
|
||||
constructor(options = {}) {
|
||||
|
|
@ -34,6 +34,10 @@ class Mount {
|
|||
options.paths.sudo = "/usr/bin/sudo";
|
||||
}
|
||||
|
||||
if (!options.paths.chroot) {
|
||||
options.paths.chroot = "/usr/sbin/chroot";
|
||||
}
|
||||
|
||||
if (!options.timeout) {
|
||||
options.timeout = 10 * 60 * 1000;
|
||||
}
|
||||
|
|
@ -379,7 +383,7 @@ class Mount {
|
|||
|
||||
exec(command, args, options = {}) {
|
||||
if (!options.hasOwnProperty("timeout")) {
|
||||
options.timeout = DEFAUT_TIMEOUT;
|
||||
options.timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
|
||||
const mount = this;
|
||||
|
|
|
|||
|
|
@ -38,6 +38,14 @@ class Zetabyte {
|
|||
};
|
||||
}
|
||||
|
||||
if (!options.logger) {
|
||||
options.logger = console;
|
||||
}
|
||||
|
||||
if (!options.hasOwnProperty("log_commands")) {
|
||||
options.log_commands = false;
|
||||
}
|
||||
|
||||
zb.DEFAULT_ZPOOL_LIST_PROPERTIES = [
|
||||
"name",
|
||||
"size",
|
||||
|
|
@ -1548,6 +1556,15 @@ class Zetabyte {
|
|||
command = zb.options.paths.sudo;
|
||||
}
|
||||
|
||||
if (zb.options.log_commands) {
|
||||
if (typeof zb.options.logger.verbose != "function") {
|
||||
zb.options.logger.verbose = function() {
|
||||
console.debug(...arguments);
|
||||
}
|
||||
}
|
||||
zb.options.logger.verbose(`executing zfs command: ${command} ${args.join(" ")}`);
|
||||
}
|
||||
|
||||
const child = zb.options.executor.spawn(command, args, options);
|
||||
|
||||
let didTimeout = false;
|
||||
|
|
|
|||
Loading…
Reference in New Issue