freenas overhaul, synology shell

Signed-off-by: Travis Glenn Hansen <travisghansen@yahoo.com>
This commit is contained in:
Travis Glenn Hansen 2021-06-03 17:20:01 -06:00
parent 8765da65c4
commit 8b6e12dd77
6 changed files with 3980 additions and 3 deletions

View File

@ -1,7 +1,6 @@
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const cp = require("child_process");
const { Mount } = require("../../utils/mount");
/**
* Crude nfs-client driver which simply creates directories to be mounted

View File

@ -0,0 +1,465 @@
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
/**
*
* Driver to provision storage on a synology device
*
*/
class ControllerSynologyDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
//"LIST_VOLUMES",
//"GET_CAPACITY",
//"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
//"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
}
}
getDriverResourceType() {
switch (this.options.driver) {
case "synology-nfs":
case "synology-smb":
return "filesystem";
case "synology-iscsi":
return "volume";
default:
throw new Error("unknown driver: " + this.ctx.args.driver);
}
}
getDriverShareType() {
switch (this.options.driver) {
case "synology-nfs":
return "nfs";
case "synology-smb":
return "smb";
case "synology-iscsi":
return "iscsi";
default:
throw new Error("unknown driver: " + this.ctx.args.driver);
}
}
assertCapabilities(capabilities) {
const driverResourceType = this.getDriverResourceType();
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
const valid = capabilities.every((capability) => {
switch (driverResourceType) {
case "filesystem":
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!["nfs", "cifs"].includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
case "volume":
if (capability.access_type == "mount") {
if (
capability.mount.fs_type &&
!["ext3", "ext4", "ext4dev", "xfs"].includes(
capability.mount.fs_type
)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
}
});
return { valid, message };
}
/**
*
* CreateVolume
*
* @param {*} call
*/
async CreateVolume(call) {
const driver = this;
let name = call.request.name;
let volume_content_source = call.request.volume_content_source;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required_bytes is greather than limit_bytes`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
switch (driver.getDriverShareType()) {
case "nfs":
// TODO: create volume here
break;
case "smb":
// TODO: create volume here
break;
case "iscsi":
// TODO: create volume here
break;
default:
// throw an error
break;
}
let volume_context = driver.getVolumeContext(name);
volume_context["provisioner_driver"] = driver.options.driver;
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
const res = {
volume: {
volume_id: name,
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
},
};
return res;
}
/**
* DeleteVolume
*
* @param {*} call
*/
async DeleteVolume(call) {
const driver = this;
let name = call.request.volume_id;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
switch (driver.getDriverShareType()) {
case "nfs":
// TODO: delete volume here
break;
case "smb":
// TODO: delete volume here
break;
case "iscsi":
// TODO: delete volume here
break;
default:
// throw an error
break;
}
return {};
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* TODO: check capability to ensure not asking about block volumes
*
* @param {*} call
*/
async ListVolumes(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
const driver = this;
// both these are required
let source_volume_id = call.request.source_volume_id;
let name = call.request.name;
if (!source_volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot source_volume_id is required`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name is required`
);
}
driver.ctx.logger.verbose("requested snapshot name: %s", name);
let invalid_chars;
invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi);
if (invalid_chars) {
invalid_chars = String.prototype.concat(
...new Set(invalid_chars.join(""))
);
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name contains invalid characters: ${invalid_chars}`
);
}
// TODO: create snapshot here
return {
snapshot: {
/**
* The purpose of this field is to give CO guidance on how much space
* is needed to create a volume from this snapshot.
*/
size_bytes: 0,
snapshot_id,
source_volume_id: source_volume_id,
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: Math.round(new Date().getTime() / 1000),
nanos: 0,
},
ready_to_use: true,
},
};
}
/**
* In addition, if clones have been created from a snapshot, then they must
* be destroyed before the snapshot can be destroyed.
*
* @param {*} call
*/
async DeleteSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
const driver = this;
const snapshot_id = call.request.snapshot_id;
if (!snapshot_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot_id is required`
);
}
// TODO: delete snapshot here
return {};
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ControllerSynologyDriver = ControllerSynologyDriver;

View File

@ -1,4 +1,5 @@
const { FreeNASDriver } = require("./freenas");
const { FreeNASSshDriver } = require("./freenas/ssh");
const { FreeNASApiDriver } = require("./freenas/api");
const { ControllerZfsGenericDriver } = require("./controller-zfs-generic");
const {
ZfsLocalEphemeralInlineDriver,
@ -6,6 +7,7 @@ const {
const { ControllerNfsClientDriver } = require("./controller-nfs-client");
const { ControllerSmbClientDriver } = require("./controller-smb-client");
const { ControllerSynologyDriver } = require("./controller-synology");
const { NodeManualDriver } = require("./node-manual");
function factory(ctx, options) {
@ -16,7 +18,15 @@ function factory(ctx, options) {
case "truenas-nfs":
case "truenas-smb":
case "truenas-iscsi":
return new FreeNASDriver(ctx, options);
return new FreeNASSshDriver(ctx, options);
case "freenas-api-iscsi":
case "freenas-api-nfs":
case "freenas-api-smb":
return new FreeNASApiDriver(ctx, options);
case "synology-nfs":
case "synology-smb":
case "synology-iscsi":
return new ControllerSynologyDriver(ctx, options);
case "zfs-generic-nfs":
case "zfs-generic-iscsi":
return new ControllerZfsGenericDriver(ctx, options);

2798
src/driver/freenas/api.js Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,705 @@
const { Zetabyte } = require("../../../utils/zfs");
// used for in-memory cache of the version info
const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version";
class Api {
constructor(client, cache, options = {}) {
this.client = client;
this.cache = cache;
this.options = options;
}
async getHttpClient() {
return this.client;
}
/**
* only here for the helpers
* @returns
*/
async getZetabyte() {
return new Zetabyte({
executor: {
spawn: function () {
throw new Error(
"cannot use the zb implementation to execute zfs commands, must use the http api"
);
},
},
});
}
async findResourceByProperties(endpoint, match) {
if (!match) {
return;
}
if (typeof match === "object" && Object.keys(match).length < 1) {
return;
}
const httpClient = await this.getHttpClient();
let target;
let page = 0;
let lastReponse;
// loop and find target
let queryParams = {};
queryParams.limit = 100;
queryParams.offset = 0;
while (!target) {
//Content-Range: items 0-2/3 (full set)
//Content-Range: items 0--1/3 (invalid offset)
if (queryParams.hasOwnProperty("offset")) {
queryParams.offset = queryParams.limit * page;
}
// crude stoppage attempt
let response = await httpClient.get(endpoint, queryParams);
if (lastReponse) {
if (JSON.stringify(lastReponse) == JSON.stringify(response)) {
break;
}
}
lastReponse = response;
if (response.statusCode == 200) {
if (response.body.length < 1) {
break;
}
response.body.some((i) => {
let isMatch = true;
if (typeof match === "function") {
isMatch = match(i);
} else {
for (let property in match) {
if (match[property] != i[property]) {
isMatch = false;
break;
}
}
}
if (isMatch) {
target = i;
return true;
}
return false;
});
} else {
throw new Error(
"FreeNAS http error - code: " +
response.statusCode +
" body: " +
JSON.stringify(response.body)
);
}
page++;
}
return target;
}
async getApiVersion() {
const systemVersion = await this.getSystemVersion();
if (systemVersion.v2) {
if ((await this.getSystemVersionMajorMinor()) == 11.2) {
return 1;
}
return 2;
}
return 1;
}
async getIsFreeNAS() {
const systemVersion = await this.getSystemVersion();
let version;
if (systemVersion.v2) {
version = systemVersion.v2;
} else {
version = systemVersion.v1.fullversion;
}
if (version.toLowerCase().includes("freenas")) {
return true;
}
return false;
}
async getIsTrueNAS() {
const systemVersion = await this.getSystemVersion();
let version;
if (systemVersion.v2) {
version = systemVersion.v2;
} else {
version = systemVersion.v1.fullversion;
}
if (version.toLowerCase().includes("truenas")) {
return true;
}
return false;
}
async getIsScale() {
const systemVersion = await this.getSystemVersion();
if (systemVersion.v2 && systemVersion.v2.toLowerCase().includes("scale")) {
return true;
}
return false;
}
async getSystemVersionMajorMinor() {
const systemVersion = await this.getSystemVersion();
let parts;
let parts_i;
let version;
/*
systemVersion.v2 = "FreeNAS-11.2-U5";
systemVersion.v2 = "TrueNAS-SCALE-20.11-MASTER-20201127-092915";
systemVersion.v1 = {
fullversion: "FreeNAS-9.3-STABLE-201503200528",
fullversion: "FreeNAS-11.2-U5 (c129415c52)",
};
systemVersion.v2 = null;
*/
if (systemVersion.v2) {
version = systemVersion.v2;
} else {
version = systemVersion.v1.fullversion;
}
if (version) {
parts = version.split("-");
parts_i = [];
parts.forEach((value) => {
let i = value.replace(/[^\d.]/g, "");
if (i.length > 0) {
parts_i.push(i);
}
});
// join and resplit to deal with single elements which contain a decimal
parts_i = parts_i.join(".").split(".");
parts_i.splice(2);
return parts_i.join(".");
}
}
async getSystemVersionMajor() {
const majorMinor = await this.getSystemVersionMajorMinor();
return majorMinor.split(".")[0];
}
async setVersionInfoCache(versionInfo) {
await this.cache.set(
FREENAS_SYSTEM_VERSION_CACHE_KEY,
versionInfo,
60 * 1000
);
}
async getSystemVersion() {
let cacheData = await this.cache.get(FREENAS_SYSTEM_VERSION_CACHE_KEY);
if (cacheData) {
return cacheData;
}
const httpClient = await this.getHttpClient(false);
const endpoint = "/system/version/";
let response;
const startApiVersion = httpClient.getApiVersion();
const versionInfo = {};
const versionErrors = {};
const versionResponses = {};
httpClient.setApiVersion(2);
/**
* FreeNAS-11.2-U5
* TrueNAS-12.0-RELEASE
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
*/
try {
response = await httpClient.get(endpoint);
versionResponses.v2 = response;
if (response.statusCode == 200) {
versionInfo.v2 = response.body;
// return immediately to save on resources and silly requests
await this.setVersionInfoCache(versionInfo);
// reset apiVersion
httpClient.setApiVersion(startApiVersion);
return versionInfo;
}
} catch (e) {
// if more info is needed use e.stack
versionErrors.v2 = e.toString();
}
httpClient.setApiVersion(1);
/**
* {"fullversion": "FreeNAS-9.3-STABLE-201503200528", "name": "FreeNAS", "version": "9.3"}
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
*/
try {
response = await httpClient.get(endpoint);
versionResponses.v1 = response;
if (response.statusCode == 200 && IsJsonString(response.body)) {
versionInfo.v1 = response.body;
await this.setVersionInfoCache(versionInfo);
// reset apiVersion
httpClient.setApiVersion(startApiVersion);
return versionInfo;
}
} catch (e) {
// if more info is needed use e.stack
versionErrors.v1 = e.toString();
}
// throw error if cannot get v1 or v2 data
// likely bad creds/url
throw new GrpcError(
grpc.status.UNKNOWN,
`FreeNAS error getting system version info: ${JSON.stringify({
errors: versionErrors,
responses: versionResponses,
})}`
);
}
getIsUserProperty(property) {
if (property.includes(":")) {
return true;
}
return false;
}
getUserProperties(properties) {
let user_properties = {};
for (const property in properties) {
if (this.getIsUserProperty(property)) {
user_properties[property] = properties[property];
}
}
return user_properties;
}
getSystemProperties(properties) {
let system_properties = {};
for (const property in properties) {
if (!this.getIsUserProperty(property)) {
system_properties[property] = properties[property];
}
}
return system_properties;
}
getPropertiesKeyValueArray(properties) {
let arr = [];
for (const property in properties) {
arr.push({ key: property, value: properties[property] });
}
return arr;
}
async DatasetCreate(datasetName, data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
data.name = datasetName;
endpoint = "/pool/dataset";
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("already exists")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* @param {*} datasetName
* @param {*} data
* @returns
*/
async DatasetDelete(datasetName, data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.delete(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("does not exist")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async DatasetSet(datasetName, properties) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.put(endpoint, {
...this.getSystemProperties(properties),
user_properties_update: this.getPropertiesKeyValueArray(
this.getUserProperties(properties)
),
});
if (response.statusCode == 200) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async DatasetInherit(datasetName, property) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
let system_properties = {};
let user_properties_update = [];
const isUserProperty = this.getIsUserProperty(property);
if (isUserProperty) {
user_properties_update = [{ key: property, remove: true }];
} else {
system_properties[property] = "INHERIT";
}
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.put(endpoint, {
...system_properties,
user_properties_update,
});
if (response.statusCode == 200) {
return;
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* zfs get -Hp all tank/k8s/test/PVC-111
*
* @param {*} datasetName
* @param {*} properties
* @returns
*/
async DatasetGet(datasetName, properties) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.get(endpoint);
if (response.statusCode == 200) {
let res = {};
for (const property of properties) {
let p;
if (response.body.hasOwnProperty(property)) {
p = response.body[property];
} else if (response.body.user_properties.hasOwnProperty(property)) {
p = response.body.user_properties[property];
} else {
p = {
value: "-",
rawvalue: "-",
source: "-",
};
}
if (typeof p === "object" && p !== null) {
// nothing, leave as is
} else {
p = {
value: p,
rawvalue: p,
};
}
res[property] = p;
}
return res;
}
if (response.statusCode == 404) {
throw new Error("dataset does not exist");
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* zfs get -Hp all tank/k8s/test/PVC-111
*
* @param {*} snapshotName
* @param {*} properties
* @returns
*/
async SnapshotGet(snapshotName, properties) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`;
response = await httpClient.get(endpoint);
if (response.statusCode == 200) {
let res = {};
for (const property of properties) {
let p;
if (response.body.hasOwnProperty(property)) {
p = response.body[property];
} else if (response.body.properties.hasOwnProperty(property)) {
p = response.body.properties[property];
} else {
p = {
value: "-",
rawvalue: "-",
source: "-",
};
}
if (typeof p === "object" && p !== null) {
// nothing, leave as is
} else {
p = {
value: p,
rawvalue: p,
};
}
res[property] = p;
}
return res;
}
if (response.statusCode == 404) {
throw new Error("dataset does not exist");
}
throw new Error(JSON.stringify(response.body));
}
async SnapshotCreate(snapshotName, data = {}) {
const httpClient = await this.getHttpClient(false);
const zb = await this.getZetabyte();
let response;
let endpoint;
const dataset = zb.helpers.extractDatasetName(snapshotName);
const snapshot = zb.helpers.extractSnapshotName(snapshotName);
data.dataset = dataset;
data.name = snapshot;
endpoint = "/zfs/snapshot";
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("already exists")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async SnapshotDelete(snapshotName, data = {}) {
const httpClient = await this.getHttpClient(false);
const zb = await this.getZetabyte();
let response;
let endpoint;
endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`;
response = await httpClient.delete(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (response.statusCode == 404) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("not found")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async CloneCreate(snapshotName, datasetName, data = {}) {
const httpClient = await this.getHttpClient(false);
const zb = await this.getZetabyte();
let response;
let endpoint;
data.snapshot = snapshotName;
data.dataset_dst = datasetName;
endpoint = "/zfs/snapshot/clone";
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("already exists")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
// get all dataset snapshots
// https://github.com/truenas/middleware/pull/6934
// then use core.bulk to delete all
async ReplicationRunOnetime(data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = "/replication/run_onetime";
response = await httpClient.post(endpoint, data);
// 200 means the 'job' was accepted only
// must continue to check the status of the job to know when it has finished and if it was successful
// /core/get_jobs [["id", "=", jobidhere]]
if (response.statusCode == 200) {
return response.body;
}
throw new Error(JSON.stringify(response.body));
}
async CoreGetJobs(data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = "/core/get_jobs";
response = await httpClient.get(endpoint, data);
// 200 means the 'job' was accepted only
// must continue to check the status of the job to know when it has finished and if it was successful
// /core/get_jobs [["id", "=", jobidhere]]
if (response.statusCode == 200) {
return response.body;
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* @param {*} data
*/
async FilesystemSetperm(data) {
/*
{
"path": "string",
"mode": "string",
"uid": 0,
"gid": 0,
"options": {
"stripacl": false,
"recursive": false,
"traverse": false
}
}
*/
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/filesystem/setperm`;
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
throw new Error(JSON.stringify(response.body));
}
}
function IsJsonString(str) {
try {
JSON.parse(str);
} catch (e) {
return false;
}
return true;
}
module.exports.Api = Api;