commit 13e24d3bdd8f0a373c14c3777e3d21037da28998 Author: Travis Glenn Hansen Date: Thu Nov 21 14:59:57 2019 -0700 initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..26ff709 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +node_modules +dev diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..3ee8da5 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,23 @@ +# if: tag IS present +language: node_js +node_js: +- "12" +sudo: required +services: +- docker +install: true +script: true +deploy: +- provider: script + script: bash .travis/docker-release.sh + skip_cleanup: true + on: + repo: democratic-csi/democratic-csi + all_branches: true + condition: $TRAVIS_BRANCH =~ ^master|next$ +- provider: script + script: bash .travis/docker-release.sh + skip_cleanup: true + on: + repo: democratic-csi/democratic-csi + tags: true diff --git a/.travis/docker-release.sh b/.travis/docker-release.sh new file mode 100755 index 0000000..1c36991 --- /dev/null +++ b/.travis/docker-release.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin + +export DOCKER_ORG="democraticcsi" +export DOCKER_PROJECT="democratic-csi" +export DOCKER_REPO="${DOCKER_ORG}/${DOCKER_PROJECT}" + +if [[ -n "${TRAVIS_TAG}" ]];then + docker build --pull -t ${DOCKER_REPO}:${TRAVIS_TAG} . + docker push ${DOCKER_REPO}:${TRAVIS_TAG} +elif [[ -n "${TRAVIS_BRANCH}" ]];then + if [[ "${TRAVIS_BRANCH}" == "master" ]];then + docker build --pull -t ${DOCKER_REPO}:latest . + docker push ${DOCKER_REPO}:latest + else + docker build --pull -t ${DOCKER_REPO}:${TRAVIS_BRANCH} . + docker push ${DOCKER_REPO}:${TRAVIS_BRANCH} + fi +else + : +fi diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..ba94a96 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,56 @@ +FROM debian:10-slim + +RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ + && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 + +ENV LANG en_US.utf8 + +# install node +ENV NODE_VERSION=v12.13.1 +ENV NODE_DISTRO=linux-x64 + +RUN apt-get update && \ + apt-get install -y wget xz-utils && \ + wget https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz && \ + mkdir -p /usr/local/lib/nodejs && \ + tar -xJvf node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz -C /usr/local/lib/nodejs && \ + rm node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz && \ + rm -rf /var/lib/apt/lists/* + +ENV PATH=/usr/local/lib/nodejs/node-${NODE_VERSION}-${NODE_DISTRO}/bin:$PATH + +# node service requirements +RUN apt-get update && \ + apt-get install -y xfsprogs fatresize dosfstools open-iscsi lsscsi sg3-utils multipath-tools scsitools nfs-common sudo && \ + rm -rf /var/lib/apt/lists/* + +# controller requirements +RUN apt-get update && \ + apt-get install -y ansible && \ + rm -rf /var/lib/apt/lists/* + +# npm requirements +RUN apt-get update && \ + apt-get install -y python make && \ + rm -rf /var/lib/apt/lists/* + +# install wrappers +ADD docker/iscsiadm /usr/local/sbin +RUN chmod +x /usr/local/sbin/iscsiadm + +# Run as a non-root user +RUN useradd --create-home csi \ + && mkdir /home/csi/app \ + && chown -R csi: /home/csi +WORKDIR /home/csi/app +USER csi + +COPY package*.json ./ +RUN npm install + +COPY --chown=csi:csi . . + +USER root + +EXPOSE 50051 +ENTRYPOINT [ "bin/democratic-csi" ] diff --git a/bin/democratic-csi b/bin/democratic-csi new file mode 100755 index 0000000..bf6072e --- /dev/null +++ b/bin/democratic-csi @@ -0,0 +1,250 @@ +#!/usr/bin/env -S node --nouse-idle-notification --expose-gc --max-old-space-size=8192 + +const yaml = require("js-yaml"); +const fs = require("fs"); + +let options; +const args = require("yargs") + .env("DEMOCRATIC_CSI") + .scriptName("democratic-csi") + .usage("$0 [options]") + .option("driver", { + alias: "d", + describe: "driver", + choices: ["freenas-nfs", "freenas-iscsi"] + }) + .demandOption(["driver"], "driver is required") + .option("driver-config-file", { + describe: "provide a path to driver config file", + config: true, + configParser: path => { + try { + options = JSON.parse(fs.readFileSync(path, "utf-8")); + return true; + } catch (e) {} + + try { + options = yaml.safeLoad(fs.readFileSync(path, "utf8")); + return true; + } catch (e) {} + + throw new Error("failed parsing config file: " + path); + } + }) + .demandOption(["driver-config-file"], "driver-config-file is required") + .option("log-level", { + describe: "log level", + choices: ["error", "warn", "info", "verbose", "debug", "silly"] + }) + .option("csi-version", { + describe: "versin of the csi spec to load", + choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0"] + }) + .demandOption(["csi-version"], "csi-version is required") + .option("csi-name", { + describe: "name to use for driver registration" + }) + .demandOption(["csi-name"], "csi-name is required") + .option("csi-mode", { + describe: "mode of the controller", + choices: ["controller", "node"], + type: "array", + default: ["controller", "node"] + }) + .demandOption(["csi-mode"], "csi-mode is required") + .option("server-address", { + describe: "listen address for the server", + default: "0.0.0.0" + }) + .option("server-port", { + describe: "listen port for the server", + default: 50051, + type: "number" + }) + .version() + .help().argv; + +const package = require("../package.json"); +args.version = package.version; + +const grpc = require("grpc"); +const protoLoader = require("@grpc/proto-loader"); +const LRU = require("lru-cache"); +const cache = new LRU({ max: 500 }); +const { logger } = require("../src/utils/logger"); +if (args.logLevel) { + logger.level = args.logLevel; +} +const csiVersion = process.env.CSI_VERSION || "1.1.0"; +const PROTO_PATH = __dirname + "/../csi_proto/csi-v" + csiVersion + ".proto"; + +// Suggested options for similarity to existing grpc.load behavior +const packageDefinition = protoLoader.loadSync(PROTO_PATH, { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true +}); + +const protoDescriptor = grpc.loadPackageDefinition(packageDefinition); +const csi = protoDescriptor.csi.v1; + +// include available drivers +const { FreeNASDriver } = require("../src/driver/freenas"); + +logger.info("initializing csi driver: %s", args.driver); + +let driver; +switch (args.driver) { + case "freenas-nfs": + case "freenas-iscsi": + driver = new FreeNASDriver({ logger, args, cache, package }, options); + break; + default: + logger.error("invalid csi driver: %s", args.driver); + break; +} + +async function requestHandlerProxy(call, callback, serviceMethodName) { + try { + logger.debug( + "new request - driver: %s method: %s call: %j", + driver.constructor.name, + serviceMethodName, + call + ); + //const response = await handler.call(driver, call); + const response = await driver[serviceMethodName](call); + logger.debug( + "new response - driver: %s method: %s response: %j", + driver.constructor.name, + serviceMethodName, + response + ); + callback(null, response); + } catch (e) { + logger.error( + "handler error - driver: %s method: %s error: %s", + driver.constructor.name, + serviceMethodName, + JSON.stringify(e) + ); + console.log(e); + if (e.name == "GrpcError") { + callback(e); + } else { + // TODO: only show real error string in development mode + const message = true + ? e.toString() + : "unknown error, please inspect service logs"; + callback({ code: grpc.status.INTERNAL, message }); + } + } +} + +function getServer() { + var server = new grpc.Server(); + + // Identity Service + server.addService(csi.Identity.service, { + async GetPluginInfo(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async GetPluginCapabilities(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async Probe(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + } + }); + + // Controller Service + if (args.csiMode.includes("controller")) { + server.addService(csi.Controller.service, { + async CreateVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async DeleteVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async ControllerPublishVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async ControllerUnpublishVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async ValidateVolumeCapabilities(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async ListVolumes(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async GetCapacity(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async ControllerGetCapabilities(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async CreateSnapshot(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async DeleteSnapshot(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async ListSnapshots(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async ControllerExpandVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + } + }); + } + + // Node Service + if (args.csiMode.includes("node")) { + server.addService(csi.Node.service, { + async NodeStageVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async NodeUnstageVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async NodePublishVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async NodeUnpublishVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async NodeGetVolumeStats(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async NodeExpandVolume(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async NodeGetCapabilities(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + }, + async NodeGetInfo(call, callback) { + requestHandlerProxy(call, callback, arguments.callee.name); + } + }); + } + + return server; +} + +// https://grpc.github.io/grpc/node/grpc.Server.html +const csiServer = getServer(); +let bindAddress = `${args.serverAddress}:${args.serverPort}`; +logger.info( + "starting csi server - name: %s, version: %s, driver: %s, mode: %s, csi version: %s, address: %s", + args.csiName, + args.version, + args.driver, + args.csiMode.join(","), + args.csiVersion, + bindAddress +); +csiServer.bind(bindAddress, grpc.ServerCredentials.createInsecure()); +csiServer.start(); diff --git a/csi_proto/csi-v0.2.0.proto b/csi_proto/csi-v0.2.0.proto new file mode 100644 index 0000000..bd8a3e3 --- /dev/null +++ b/csi_proto/csi-v0.2.0.proto @@ -0,0 +1,673 @@ +syntax = "proto3"; +package csi.v0; + +option go_package = "csi"; +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc NodeGetId (NodeGetIdRequest) + returns (NodeGetIdResponse) {} + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} +} +message GetPluginInfoRequest { +} + +message GetPluginInfoResponse { + // The name MUST follow reverse domain name notation format + // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). + // It SHOULD include the plugin's host company name and the plugin + // name, to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), underscores (_), + // dots (.), and alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 2; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins may wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + } + Type type = 1; + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + } +} +message ProbeRequest { +} + +message ProbeResponse { + // Intentionally empty. +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + map controller_create_secrets = 5; +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` can be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume must be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume must not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// The information about a provisioned volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + string id = 2; + + // Attributes reflect static properties of a volume and MUST be passed + // to volume validation and publishing calls. + // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable + // and SHALL be safe for the CO to cache. Attributes SHOULD NOT + // contain sensitive information. Attributes MAY NOT uniquely identify + // a volume. A volume uniquely identified by `id` SHALL always report + // the same attributes. This field is OPTIONAL and when present MUST + // be passed to volume validation and publishing calls. + map attributes = 3; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + map controller_delete_secrets = 2; +} + +message DeleteVolumeResponse { +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetId`. + string node_id = 2; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + map controller_publish_secrets = 5; + + // Attributes of the volume to be used on a node. This field is + // OPTIONAL and MUST match the attributes of the Volume identified + // by `volume_id`. + map volume_attributes = 6; +} + +message ControllerPublishVolumeResponse { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodeStageVolume` or `NodePublishVolume` calls + // for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + map publish_info = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetId` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume. + // call for the specified volume. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + map controller_unpublish_secrets = 3; +} + +message ControllerUnpublishVolumeResponse { +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // Attributes of the volume to check. This field is OPTIONAL and MUST + // match the attributes of the Volume identified by `volume_id`. + map volume_attributes = 3; +} + +message ValidateVolumeCapabilitiesResponse { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + bool supported = 1; + + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message Entry { + Volume volume = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; +} + +message GetCapacityResponse { + // The available capacity of the storage that can be used to + // provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; +} +message ControllerGetCapabilitiesRequest { +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 2; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_info = 2; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure that there is only one + // staging_target_path per volume. + // This is a REQUIRED field. + string staging_target_path = 3; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + map node_stage_secrets = 5; + + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the VolumeInfo identified by + // `volume_id`. + map volume_attributes = 6; +} + +message NodeStageVolumeResponse { +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_info = 2; + + // The path to which the device was mounted by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the path exists, and that the process + // serving the request has `read` and `write` permissions to the path. + // This is a REQUIRED field. + string target_path = 4; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + map node_publish_secrets = 7; + + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the Volume identified by + // `volume_id`. + map volume_attributes = 8; +} + +message NodePublishVolumeResponse { +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { +} +message NodeGetIdRequest { +} + +message NodeGetIdResponse { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent `ControllerPublishVolume`. + // This is a REQUIRED field. + string node_id = 1; +} +message NodeGetCapabilitiesRequest { +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} diff --git a/csi_proto/csi-v0.3.0.proto b/csi_proto/csi-v0.3.0.proto new file mode 100644 index 0000000..22cff40 --- /dev/null +++ b/csi_proto/csi-v0.3.0.proto @@ -0,0 +1,1093 @@ +// Code generated by make; DO NOT EDIT. +syntax = "proto3"; +package csi.v0; + +import "google/protobuf/wrappers.proto"; + +option go_package = "csi"; +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} + + rpc CreateSnapshot (CreateSnapshotRequest) + returns (CreateSnapshotResponse) {} + + rpc DeleteSnapshot (DeleteSnapshotRequest) + returns (DeleteSnapshotResponse) {} + + rpc ListSnapshots (ListSnapshotsRequest) + returns (ListSnapshotsResponse) {} +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + // NodeGetId is being deprecated in favor of NodeGetInfo and will be + // removed in CSI 1.0. Existing drivers, however, may depend on this + // RPC call and hence this RPC call MUST be implemented by the CSI + // plugin prior to v1.0. + rpc NodeGetId (NodeGetIdRequest) + returns (NodeGetIdResponse) { + option deprecated = true; + } + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} + + // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and + // NodeGetInfo RPC calls. + rpc NodeGetInfo (NodeGetInfoRequest) + returns (NodeGetInfoResponse) {} +} +message GetPluginInfoRequest { + // Intentionally empty. +} + +message GetPluginInfoResponse { + // The name MUST follow reverse domain name notation format + // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). + // It SHOULD include the plugin's host company name and the plugin + // name, to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), underscores (_), + // dots (.), and alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { + // Intentionally empty. +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 2; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins may wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + + // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this + // plugin may not be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + ACCESSIBILITY_CONSTRAINTS = 2; + } + Type type = 1; + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + } +} +message ProbeRequest { + // Intentionally empty. +} + +message ProbeResponse { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + .google.protobuf.BoolValue ready = 1; +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which may force some backends to internally extend + // the volume after creating it. + + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map controller_create_secrets = 5; + + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource volume_content_source = 6; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose + // where the provisioned volume is accessible from. + TopologyRequirement accessibility_requirements = 7; +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +message VolumeContentSource { + message SnapshotSource { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + string id = 1; + } + + oneof type { + SnapshotSource snapshot = 1; + } +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// The information about a provisioned volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + string id = 2; + + // Attributes reflect static properties of a volume and MUST be passed + // to volume validation and publishing calls. + // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable + // and SHALL be safe for the CO to cache. Attributes SHOULD NOT + // contain sensitive information. Attributes MAY NOT uniquely identify + // a volume. A volume uniquely identified by `id` SHALL always report + // the same attributes. This field is OPTIONAL and when present MUST + // be passed to volume validation and publishing calls. + map attributes = 3; + + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + VolumeContentSource content_source = 4; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // may schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + repeated Topology accessible_topology = 5; +} + +message TopologyRequirement { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, than the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, than the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + repeated Topology requisite = 1; + + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + repeated Topology preferred = 2; +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an optional prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is required. The prefix is optional. +// Both the key name and the prefix MUST each be 63 characters or less, +// begin and end with an alphanumeric character ([a-z0-9A-Z]) and +// contain only dashes (-), underscores (_), dots (.), or alphanumerics +// in between, for example "zone". +// The key prefix MUST follow reverse domain name notation format +// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +message Topology { + map segments = 1; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map controller_delete_secrets = 2; +} + +message DeleteVolumeResponse { + // Intentionally empty. +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + string node_id = 2; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + map controller_publish_secrets = 5; + + // Attributes of the volume to be used on a node. This field is + // OPTIONAL and MUST match the attributes of the Volume identified + // by `volume_id`. + map volume_attributes = 6; +} + +message ControllerPublishVolumeResponse { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodeStageVolume` or `NodePublishVolume` calls + // for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + map publish_info = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map controller_unpublish_secrets = 3; +} + +message ControllerUnpublishVolumeResponse { + // Intentionally empty. +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // Attributes of the volume to check. This field is OPTIONAL and MUST + // match the attributes of the Volume identified by `volume_id`. + map volume_attributes = 3; + + // Specifies where (regions, zones, racks, etc.) the caller believes + // the volume is accessible from. + // A caller MAY specify multiple topologies to indicate they believe + // the volume to be accessible from multiple locations. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + repeated Topology accessible_topology = 4; +} + +message ValidateVolumeCapabilitiesResponse { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + bool supported = 1; + + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message Entry { + Volume volume = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + Topology accessible_topology = 3; +} + +message GetCapacityResponse { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; +} +message ControllerGetCapabilitiesRequest { + // Intentionally empty. +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 2; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + CREATE_DELETE_SNAPSHOT = 5; + // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload + // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used + // with the snapshot_id as the filter to query whether the + // uploading process is complete or not. + LIST_SNAPSHOTS = 6; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message CreateSnapshotRequest { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + string source_volume_id = 1; + + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + string name = 2; + + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map create_snapshot_secrets = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + map parameters = 4; +} + +message CreateSnapshotResponse { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot snapshot = 1; +} + +// The information about a provisioned snapshot. +message Snapshot { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + int64 size_bytes = 1; + + // Uniquely identifies a snapshot and is generated by the plugin. It + // will not change over time. This field is REQUIRED. The identity + // information will be used by the CO in subsequent calls to refer to + // the provisioned snapshot. + string id = 2; + + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + string source_volume_id = 3; + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. The format of this field should be a Unix nanoseconds time + // encoded as an int64. On Unix, the command `date +%s%N` returns the + // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This + // field is REQUIRED. + int64 created_at = 4; + + // The status of a snapshot. + SnapshotStatus status = 5; +} + +// The status of a snapshot. +message SnapshotStatus { + enum Type { + UNKNOWN = 0; + // A snapshot is ready for use. + READY = 1; + // A snapshot is cut and is now being uploaded. + // Some cloud providers and storage systems uploads the snapshot + // to the cloud after the snapshot is cut. During this phase, + // `thaw` can be done so the application can be running again if + // `freeze` was done before taking the snapshot. + UPLOADING = 2; + // An error occurred during the snapshot uploading process. + // This error status is specific for uploading because + // `CreateSnaphot` is a blocking call before the snapshot is + // cut and therefore it SHOULD NOT come back with an error + // status when an error occurs. Instead a gRPC error code SHALL + // be returned by `CreateSnapshot` when an error occurs before + // a snapshot is cut. + ERROR_UPLOADING = 3; + } + // This field is REQUIRED. + Type type = 1; + + // Additional information to describe why a snapshot ended up in the + // `ERROR_UPLOADING` status. This field is OPTIONAL. + string details = 2; +} +message DeleteSnapshotRequest { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + string snapshot_id = 1; + + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map delete_snapshot_secrets = 2; +} + +message DeleteSnapshotResponse {} +// List all snapshots on the storage system regardless of how they were +// created. +message ListSnapshotsRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; + + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + string source_volume_id = 3; + + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being uploaded. + string snapshot_id = 4; +} + +message ListSnapshotsResponse { + message Entry { + Snapshot snapshot = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_info = 2; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure that there is only one + // staging_target_path per volume. + // This is a REQUIRED field. + string staging_target_path = 3; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map node_stage_secrets = 5; + + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the `Volume` identified by + // `volume_id`. + map volume_attributes = 6; +} + +message NodeStageVolumeResponse { + // Intentionally empty. +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { + // Intentionally empty. +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_info = 2; + + // The path to which the device was mounted by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the path exists, and that the process + // serving the request has `read` and `write` permissions to the path. + // This is a REQUIRED field. + string target_path = 4; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map node_publish_secrets = 7; + + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the Volume identified by + // `volume_id`. + map volume_attributes = 8; +} + +message NodePublishVolumeResponse { + // Intentionally empty. +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { + // Intentionally empty. +} +message NodeGetIdRequest { + // Intentionally empty. +} + +message NodeGetIdResponse { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent `ControllerPublishVolume`. + // This is a REQUIRED field. + string node_id = 1; +} +message NodeGetCapabilitiesRequest { + // Intentionally empty. +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeGetInfoRequest { +} + +message NodeGetInfoResponse { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent calls to `ControllerPublishVolume`. + // This is a REQUIRED field. + string node_id = 1; + + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + int64 max_volumes_per_node = 2; + + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "R2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + Topology accessible_topology = 3; +} diff --git a/csi_proto/csi-v1.0.0.proto b/csi_proto/csi-v1.0.0.proto new file mode 100644 index 0000000..ec5e379 --- /dev/null +++ b/csi_proto/csi-v1.0.0.proto @@ -0,0 +1,1203 @@ +// Code generated by make; DO NOT EDIT. +syntax = "proto3"; +package csi.v1; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "csi"; + +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; +} +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} + + rpc CreateSnapshot (CreateSnapshotRequest) + returns (CreateSnapshotResponse) {} + + rpc DeleteSnapshot (DeleteSnapshotRequest) + returns (DeleteSnapshotResponse) {} + + rpc ListSnapshots (ListSnapshotsRequest) + returns (ListSnapshotsResponse) {} +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} + + rpc NodeGetInfo (NodeGetInfoRequest) + returns (NodeGetInfoResponse) {} +} +message GetPluginInfoRequest { + // Intentionally empty. +} + +message GetPluginInfoResponse { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { + // Intentionally empty. +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 1; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; + } + Type type = 1; + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + } +} +message ProbeRequest { + // Intentionally empty. +} + +message ProbeResponse { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + .google.protobuf.BoolValue ready = 1; +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource volume_content_source = 6; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + TopologyRequirement accessibility_requirements = 7; +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +message VolumeContentSource { + message SnapshotSource { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; + } + + oneof type { + SnapshotSource snapshot = 1; + VolumeSource volume = 2; + } +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// Information about a specific volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; + + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + VolumeContentSource content_source = 4; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + repeated Topology accessible_topology = 5; +} + +message TopologyRequirement { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, than the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, than the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + repeated Topology requisite = 1; + + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + repeated Topology preferred = 2; +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +message Topology { + map segments = 1; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteVolumeResponse { + // Intentionally empty. +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + string node_id = 2; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message ControllerPublishVolumeResponse { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerUnpublishVolumeResponse { + // Intentionally empty. +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; + + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ValidateVolumeCapabilitiesResponse { + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message Entry { + Volume volume = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + Topology accessible_topology = 3; +} + +message GetCapacityResponse { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; +} +message ControllerGetCapabilitiesRequest { + // Intentionally empty. +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 1; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + CREATE_DELETE_SNAPSHOT = 5; + LIST_SNAPSHOTS = 6; + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message CreateSnapshotRequest { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + string source_volume_id = 1; + + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 2; + + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + map parameters = 4; +} + +message CreateSnapshotResponse { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot snapshot = 1; +} + +// Information about a specific snapshot. +message Snapshot { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + int64 size_bytes = 1; + + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; + + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + string source_volume_id = 3; + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; + + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; +} +message DeleteSnapshotRequest { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + string snapshot_id = 1; + + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteSnapshotResponse {} +// List all snapshots on the storage system regardless of how they were +// created. +message ListSnapshotsRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; + + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + string source_volume_id = 3; + + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + string snapshot_id = 4; +} + +message ListSnapshotsResponse { + message Entry { + Snapshot snapshot = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + string staging_target_path = 3; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message NodeStageVolumeResponse { + // Intentionally empty. +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { + // Intentionally empty. +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + string target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 7 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; +} + +message NodePublishVolumeResponse { + // Intentionally empty. +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { + // Intentionally empty. +} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; +} +message NodeGetCapabilitiesRequest { + // Intentionally empty. +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeGetInfoRequest { +} + +message NodeGetInfoResponse { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + string node_id = 1; + + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + int64 max_volumes_per_node = 2; + + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "R2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + Topology accessible_topology = 3; +} diff --git a/csi_proto/csi-v1.1.0.proto b/csi_proto/csi-v1.1.0.proto new file mode 100644 index 0000000..4c4d225 --- /dev/null +++ b/csi_proto/csi-v1.1.0.proto @@ -0,0 +1,1306 @@ +// Code generated by make; DO NOT EDIT. +syntax = "proto3"; +package csi.v1; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "csi"; + +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; +} +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} + + rpc CreateSnapshot (CreateSnapshotRequest) + returns (CreateSnapshotResponse) {} + + rpc DeleteSnapshot (DeleteSnapshotRequest) + returns (DeleteSnapshotResponse) {} + + rpc ListSnapshots (ListSnapshotsRequest) + returns (ListSnapshotsResponse) {} + + rpc ControllerExpandVolume (ControllerExpandVolumeRequest) + returns (ControllerExpandVolumeResponse) {} +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} + + + rpc NodeExpandVolume(NodeExpandVolumeRequest) + returns (NodeExpandVolumeResponse) {} + + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} + + rpc NodeGetInfo (NodeGetInfoRequest) + returns (NodeGetInfoResponse) {} +} +message GetPluginInfoRequest { + // Intentionally empty. +} + +message GetPluginInfoResponse { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { + // Intentionally empty. +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 1; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; + } + Type type = 1; + } + + message VolumeExpansion { + enum Type { + UNKNOWN = 0; + + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + ONLINE = 1; + + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + OFFLINE = 2; + } + Type type = 1; + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + VolumeExpansion volume_expansion = 2; + } +} +message ProbeRequest { + // Intentionally empty. +} + +message ProbeResponse { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + .google.protobuf.BoolValue ready = 1; +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource volume_content_source = 6; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + TopologyRequirement accessibility_requirements = 7; +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +message VolumeContentSource { + message SnapshotSource { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; + } + + oneof type { + SnapshotSource snapshot = 1; + VolumeSource volume = 2; + } +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// Information about a specific volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; + + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + VolumeContentSource content_source = 4; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + repeated Topology accessible_topology = 5; +} + +message TopologyRequirement { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + repeated Topology requisite = 1; + + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + repeated Topology preferred = 2; +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +message Topology { + map segments = 1; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteVolumeResponse { + // Intentionally empty. +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + string node_id = 2; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message ControllerPublishVolumeResponse { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerUnpublishVolumeResponse { + // Intentionally empty. +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; + + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ValidateVolumeCapabilitiesResponse { + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message Entry { + Volume volume = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + Topology accessible_topology = 3; +} + +message GetCapacityResponse { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; +} +message ControllerGetCapabilitiesRequest { + // Intentionally empty. +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 1; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + CREATE_DELETE_SNAPSHOT = 5; + LIST_SNAPSHOTS = 6; + + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; + + // See VolumeExpansion for details. + EXPAND_VOLUME = 9; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message CreateSnapshotRequest { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + string source_volume_id = 1; + + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 2; + + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + map parameters = 4; +} + +message CreateSnapshotResponse { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot snapshot = 1; +} + +// Information about a specific snapshot. +message Snapshot { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + int64 size_bytes = 1; + + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; + + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + string source_volume_id = 3; + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; + + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; +} +message DeleteSnapshotRequest { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + string snapshot_id = 1; + + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteSnapshotResponse {} +// List all snapshots on the storage system regardless of how they were +// created. +message ListSnapshotsRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; + + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + string source_volume_id = 3; + + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + string snapshot_id = 4; +} + +message ListSnapshotsResponse { + message Entry { + Snapshot snapshot = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message ControllerExpandVolumeRequest { + // The ID of the volume to expand. This field is REQUIRED. + string volume_id = 1; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. This field is REQUIRED. + CapacityRange capacity_range = 2; + + // Secrets required by the plugin for expanding the volume. + // This field is OPTIONAL. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerExpandVolumeResponse { + // Capacity of volume after expansion. This field is REQUIRED. + int64 capacity_bytes = 1; + + // Whether node expansion is required for the volume. When true + // the CO MUST make NodeExpandVolume RPC call on the node. This field + // is REQUIRED. + bool node_expansion_required = 2; +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + string staging_target_path = 3; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message NodeStageVolumeResponse { + // Intentionally empty. +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { + // Intentionally empty. +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + string target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 7 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; +} + +message NodePublishVolumeResponse { + // Intentionally empty. +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { + // Intentionally empty. +} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; +} +message NodeGetCapabilitiesRequest { + // Intentionally empty. +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; + // See VolumeExpansion for details. + EXPAND_VOLUME = 3; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeGetInfoRequest { +} + +message NodeGetInfoResponse { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + string node_id = 1; + + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + int64 max_volumes_per_node = 2; + + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + Topology accessible_topology = 3; +} +message NodeExpandVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path on which volume is available. This field is REQUIRED. + string volume_path = 2; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. If capacity_range is omitted then a plugin MAY + // inspect the file system of the volume to determine the maximum + // capacity to which the volume can be expanded. In such cases a + // plugin MAY expand the volume to its maximum capacity. + // This field is OPTIONAL. + CapacityRange capacity_range = 3; +} + +message NodeExpandVolumeResponse { + // The capacity of the volume in bytes. This field is OPTIONAL. + int64 capacity_bytes = 1; +} diff --git a/csi_proto/csi-v1.2.0.proto b/csi_proto/csi-v1.2.0.proto new file mode 100644 index 0000000..ae377b5 --- /dev/null +++ b/csi_proto/csi-v1.2.0.proto @@ -0,0 +1,1369 @@ +// Code generated by make; DO NOT EDIT. +syntax = "proto3"; +package csi.v1; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "csi"; + +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; +} +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} + + rpc CreateSnapshot (CreateSnapshotRequest) + returns (CreateSnapshotResponse) {} + + rpc DeleteSnapshot (DeleteSnapshotRequest) + returns (DeleteSnapshotResponse) {} + + rpc ListSnapshots (ListSnapshotsRequest) + returns (ListSnapshotsResponse) {} + + rpc ControllerExpandVolume (ControllerExpandVolumeRequest) + returns (ControllerExpandVolumeResponse) {} +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} + + + rpc NodeExpandVolume(NodeExpandVolumeRequest) + returns (NodeExpandVolumeResponse) {} + + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} + + rpc NodeGetInfo (NodeGetInfoRequest) + returns (NodeGetInfoResponse) {} +} +message GetPluginInfoRequest { + // Intentionally empty. +} + +message GetPluginInfoResponse { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { + // Intentionally empty. +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 1; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; + } + Type type = 1; + } + + message VolumeExpansion { + enum Type { + UNKNOWN = 0; + + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + ONLINE = 1; + + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + OFFLINE = 2; + } + Type type = 1; + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + VolumeExpansion volume_expansion = 2; + } +} +message ProbeRequest { + // Intentionally empty. +} + +message ProbeResponse { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + .google.protobuf.BoolValue ready = 1; +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource volume_content_source = 6; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + TopologyRequirement accessibility_requirements = 7; +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +message VolumeContentSource { + message SnapshotSource { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; + } + + oneof type { + SnapshotSource snapshot = 1; + VolumeSource volume = 2; + } +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// Information about a specific volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; + + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + VolumeContentSource content_source = 4; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + repeated Topology accessible_topology = 5; +} + +message TopologyRequirement { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + repeated Topology requisite = 1; + + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + repeated Topology preferred = 2; +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +message Topology { + map segments = 1; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteVolumeResponse { + // Intentionally empty. +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + string node_id = 2; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message ControllerPublishVolumeResponse { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerUnpublishVolumeResponse { + // Intentionally empty. +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; + + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ValidateVolumeCapabilitiesResponse { + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message VolumeStatus{ + // A list of all `node_id` of nodes that the volume in this entry + // is controller published on. + // This field is OPTIONAL. If it is not specified and the SP has + // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO + // MAY assume the volume is not controller published to any nodes. + // If the field is not specified and the SP does not have the + // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST + // not interpret this field. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + repeated string published_node_ids = 1; + } + + message Entry { + // This field is REQUIRED + Volume volume = 1; + + // This field is OPTIONAL. This field MUST be specified if the + // LIST_VOLUMES_PUBLISHED_NODES controller capability is + // supported. + VolumeStatus status = 2; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + Topology accessible_topology = 3; +} + +message GetCapacityResponse { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; +} +message ControllerGetCapabilitiesRequest { + // Intentionally empty. +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 1; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + CREATE_DELETE_SNAPSHOT = 5; + LIST_SNAPSHOTS = 6; + + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; + + // See VolumeExpansion for details. + EXPAND_VOLUME = 9; + + // Indicates the SP supports the + // ListVolumesResponse.entry.published_nodes field + LIST_VOLUMES_PUBLISHED_NODES = 10; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message CreateSnapshotRequest { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + string source_volume_id = 1; + + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 2; + + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + map parameters = 4; +} + +message CreateSnapshotResponse { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot snapshot = 1; +} + +// Information about a specific snapshot. +message Snapshot { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + int64 size_bytes = 1; + + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; + + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + string source_volume_id = 3; + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; + + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; +} +message DeleteSnapshotRequest { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + string snapshot_id = 1; + + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteSnapshotResponse {} +// List all snapshots on the storage system regardless of how they were +// created. +message ListSnapshotsRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; + + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + string source_volume_id = 3; + + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + string snapshot_id = 4; + + // Secrets required by plugin to complete ListSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ListSnapshotsResponse { + message Entry { + Snapshot snapshot = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message ControllerExpandVolumeRequest { + // The ID of the volume to expand. This field is REQUIRED. + string volume_id = 1; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. This field is REQUIRED. + CapacityRange capacity_range = 2; + + // Secrets required by the plugin for expanding the volume. + // This field is OPTIONAL. + map secrets = 3 [(csi_secret) = true]; + + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is + // being used as a block device - the SP MAY set + // node_expansion_required to false in ControllerExpandVolumeResponse + // to skip invocation of NodeExpandVolume on the node by the CO. + // This is an OPTIONAL field. + VolumeCapability volume_capability = 4; +} + +message ControllerExpandVolumeResponse { + // Capacity of volume after expansion. This field is REQUIRED. + int64 capacity_bytes = 1; + + // Whether node expansion is required for the volume. When true + // the CO MUST make NodeExpandVolume RPC call on the node. This field + // is REQUIRED. + bool node_expansion_required = 2; +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + string staging_target_path = 3; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message NodeStageVolumeResponse { + // Intentionally empty. +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { + // Intentionally empty. +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + string target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 7 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; +} + +message NodePublishVolumeResponse { + // Intentionally empty. +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { + // Intentionally empty. +} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + string volume_path = 2; + + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + string staging_target_path = 3; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; +} +message NodeGetCapabilitiesRequest { + // Intentionally empty. +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; + // See VolumeExpansion for details. + EXPAND_VOLUME = 3; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeGetInfoRequest { +} + +message NodeGetInfoResponse { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + string node_id = 1; + + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + int64 max_volumes_per_node = 2; + + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + Topology accessible_topology = 3; +} +message NodeExpandVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path on which volume is available. This field is REQUIRED. + string volume_path = 2; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. If capacity_range is omitted then a plugin MAY + // inspect the file system of the volume to determine the maximum + // capacity to which the volume can be expanded. In such cases a + // plugin MAY expand the volume to its maximum capacity. + // This field is OPTIONAL. + CapacityRange capacity_range = 3; + + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + string staging_target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is being + // used as a block device the SP MAY choose to skip expanding the + // filesystem in NodeExpandVolume implementation but still perform + // rest of the housekeeping needed for expanding the volume. If + // volume_capability is omitted the SP MAY determine + // access_type from given volume_path for the volume and perform + // node expansion. This is an OPTIONAL field. + VolumeCapability volume_capability = 5; +} + +message NodeExpandVolumeResponse { + // The capacity of the volume in bytes. This field is OPTIONAL. + int64 capacity_bytes = 1; +} diff --git a/docker/iscsiadm b/docker/iscsiadm new file mode 100644 index 0000000..cce2b01 --- /dev/null +++ b/docker/iscsiadm @@ -0,0 +1,5 @@ +#!/bin/bash + +# https://engineering.docker.com/2019/07/road-to-containing-iscsi/ + +chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin" iscsiadm "${@:1}" diff --git a/examples/freenas-iscsi.yaml b/examples/freenas-iscsi.yaml new file mode 100644 index 0000000..1ab158f --- /dev/null +++ b/examples/freenas-iscsi.yaml @@ -0,0 +1,59 @@ +httpConnection: + protocol: http + host: server address + port: 80 + username: root + password: + allowInsecure: true +sshConnection: + host: server address + port: 22 + username: root + # use either password or key + password: "" + privateKey: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- +zfs: + # total volume name (zvol//) length cannot exceed 63 chars + # https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab + # standard volume naming overhead is 46 chars + # datasetParentName should therefore be 17 chars or less + datasetParentName: tank/k8s/b/vols + detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps + # "" (inherit), lz4, gzip-9, etc + zvolCompression: + # "" (inherit), on, off, verify + zvolDedup: + zvolEnableReservation: false + # 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K + zvolBlocksize: +iscsi: + targetPortal: "server:3261" + targetPortals: [] + # leave empty to omit usage of -I with iscsiadm + interface: + namePrefix: csi- + nameSuffix: "-clustera" + # add as many as needed + targetGroups: + # get the correct ID from the "portal" section in the UI + - targetGroupPortalGroup: 1 + # get the correct ID from the "initiators" section in the UI + targetGroupInitiatorGroup: 1 + # None, CHAP, or CHAP Mutual + targetGroupAuthType: None + # get the correct ID from the "Authorized Access" section of the UI + # only required if using Chap + targetGroupAuthGroup: + + extentInsecureTpc: true + extentXenCompat: false + extentDisablePhysicalBlocksize: true + # 512, 1024, 2048, or 4096, + extentBlocksize: 512 + # "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000 + extentRpm: "SSD" + # 0-100 (0 == ignore) + extentAvailThreshold: 0 diff --git a/examples/freenas-nfs.yaml b/examples/freenas-nfs.yaml new file mode 100644 index 0000000..b9c8479 --- /dev/null +++ b/examples/freenas-nfs.yaml @@ -0,0 +1,34 @@ +httpConnection: + protocol: http + host: server address + port: 80 + username: root + password: + allowInsecure: true +sshConnection: + host: server address + port: 22 + username: root + # use either password or key + password: "" + privateKey: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- +zfs: + datasetParentName: tank/k8s/a/vols + detachedSnapshotsDatasetParentName: tank/k8s/a/snaps + datasetEnableQuotas: true + datasetEnableReservation: false + datasetPermissionsMode: "0777" + datasetPermissionsUser: root + datasetPermissionsGroup: wheel +nfs: + shareHost: server address + shareAlldirs: false + shareAllowedHosts: [] + shareAllowedNetworks: [] + shareMaprootUser: root + shareMaprootGroup: wheel + shareMapallUser: "" + shareMapallGroup: "" diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..a3dab0d --- /dev/null +++ b/package-lock.json @@ -0,0 +1,2397 @@ +{ + "name": "democratic-csi", + "version": "0.1.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@babel/code-frame": { + "version": "7.5.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.5.5.tgz", + "integrity": "sha512-27d4lZoomVyo51VegxI20xZPuSHusqbQag/ztrBC7wegWoQ1nLREPVSKSW8byhTlzTKyNE4ifaTA6lCp7JjpFw==", + "requires": { + "@babel/highlight": "^7.0.0" + } + }, + "@babel/highlight": { + "version": "7.5.0", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.5.0.tgz", + "integrity": "sha512-7dV4eu9gBxoM0dAnj/BCFDW9LFU0zvTrkq0ugM7pnHEgguOEeOz1so2ZghEdzviYzQEED0r4EAgpsBChKy1TRQ==", + "requires": { + "chalk": "^2.0.0", + "esutils": "^2.0.2", + "js-tokens": "^4.0.0" + } + }, + "@grpc/proto-loader": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.5.3.tgz", + "integrity": "sha512-8qvUtGg77G2ZT2HqdqYoM/OY97gQd/0crSG34xNmZ4ZOsv3aQT/FQV9QfZPazTGna6MIoyUd+u6AxsoZjJ/VMQ==", + "requires": { + "lodash.camelcase": "^4.3.0", + "protobufjs": "^6.8.6" + }, + "dependencies": { + "long": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", + "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" + }, + "protobufjs": { + "version": "6.8.8", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.8.8.tgz", + "integrity": "sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw==", + "requires": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/long": "^4.0.0", + "@types/node": "^10.1.0", + "long": "^4.0.0" + } + } + } + }, + "@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" + }, + "@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" + }, + "@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", + "requires": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" + }, + "@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" + }, + "@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" + }, + "@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" + }, + "@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" + }, + "@types/bytebuffer": { + "version": "5.0.40", + "resolved": "https://registry.npmjs.org/@types/bytebuffer/-/bytebuffer-5.0.40.tgz", + "integrity": "sha512-h48dyzZrPMz25K6Q4+NCwWaxwXany2FhQg/ErOcdZS1ZpsaDnDMZg8JYLMTGz7uvXKrcKGJUZJlZObyfgdaN9g==", + "requires": { + "@types/long": "*", + "@types/node": "*" + } + }, + "@types/color-name": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", + "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" + }, + "@types/long": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.0.tgz", + "integrity": "sha512-1w52Nyx4Gq47uuu0EVcsHBxZFJgurQ+rTKS3qMHxR1GY2T8c2AJYd6vZoZ9q1rupaDjU0yT+Jc2XTyXkjeMA+Q==" + }, + "@types/node": { + "version": "10.14.9", + "resolved": "https://registry.npmjs.org/@types/node/-/node-10.14.9.tgz", + "integrity": "sha512-NelG/dSahlXYtSoVPErrp06tYFrvzj8XLWmKA+X8x0W//4MqbUyZu++giUG/v0bjAT6/Qxa8IjodrfdACyb0Fg==" + }, + "acorn": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.1.0.tgz", + "integrity": "sha512-kL5CuoXA/dgxlBbVrflsflzQ3PAas7RYZB52NOm/6839iVYJgKMJ3cQJD+t2i5+qFa8h3MDpEOJiS64E8JLnSQ==" + }, + "acorn-jsx": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.1.0.tgz", + "integrity": "sha512-tMUqwBWfLFbJbizRmEcWSLw6HnFzfdJs2sOJEOwwtVPMoH/0Ay+E703oZz78VSXZiiDcZrQ5XKjPIUQixhmgVw==" + }, + "ajv": { + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.10.0.tgz", + "integrity": "sha512-nffhOpkymDECQyR0mnsUtoCE8RlX38G0rYP+wgLWFyZuUyuuojSSvi/+euOiQBIn63whYwYVIIH1TvE3tu4OEg==", + "requires": { + "fast-deep-equal": "^2.0.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ansi-escapes": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.0.tgz", + "integrity": "sha512-EiYhwo0v255HUL6eDyuLrXEkTi7WwVCLAw+SeOQ7M7qdun1z1pum4DEm/nuqIVbPvi9RPPc9k9LbyBv6H0DwVg==", + "requires": { + "type-fest": "^0.8.1" + } + }, + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "requires": { + "color-convert": "^1.9.0" + } + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "ascli": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ascli/-/ascli-1.0.1.tgz", + "integrity": "sha1-vPpZdKYvGOgcq660lzKrSoj5Brw=", + "requires": { + "colour": "~0.7.1", + "optjs": "~3.2.2" + } + }, + "asn1": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", + "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "requires": { + "safer-buffer": "~2.1.0" + } + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" + }, + "astral-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", + "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==" + }, + "async": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", + "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", + "requires": { + "lodash": "^4.17.14" + } + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" + }, + "aws4": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.8.0.tgz", + "integrity": "sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ==" + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" + }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "requires": { + "tweetnacl": "^0.14.3" + } + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "bunyan": { + "version": "1.8.12", + "resolved": "https://registry.npmjs.org/bunyan/-/bunyan-1.8.12.tgz", + "integrity": "sha1-8VDw9nSKvdcq6uhPBEA74u8RN5c=", + "requires": { + "dtrace-provider": "~0.8", + "moment": "^2.10.6", + "mv": "~2", + "safe-json-stringify": "~1" + } + }, + "bytebuffer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/bytebuffer/-/bytebuffer-5.0.1.tgz", + "integrity": "sha1-WC7qSxqHO20CCkjVjfhfC7ps/d0=", + "requires": { + "long": "~3" + } + }, + "callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==" + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==" + }, + "cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "requires": { + "restore-cursor": "^3.1.0" + } + }, + "cli-width": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.0.tgz", + "integrity": "sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=" + }, + "cliui": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", + "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^6.2.0" + } + }, + "code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" + }, + "color": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", + "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", + "requires": { + "color-convert": "^1.9.1", + "color-string": "^1.5.2" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" + }, + "color-string": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", + "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", + "requires": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "colornames": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/colornames/-/colornames-1.1.1.tgz", + "integrity": "sha1-+IiQMGhcfE/54qVZ9Qd+t2qBb5Y=" + }, + "colors": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.3.3.tgz", + "integrity": "sha512-mmGt/1pZqYRjMxB1axhTo16/snVZ5krrKkcmMeVKxzECMMXoCgnvTPp10QgHfcbQZw8Dq2jMNG6je4JlWU0gWg==" + }, + "colorspace": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.2.tgz", + "integrity": "sha512-vt+OoIP2d76xLhjwbBaucYlNSpPsrJWPlBTtwCpQKIu6/CSMutyzX93O/Do0qzpH3YoHEes8YEFXyZ797rEhzQ==", + "requires": { + "color": "3.0.x", + "text-hex": "1.0.x" + } + }, + "colour": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/colour/-/colour-0.7.1.tgz", + "integrity": "sha1-nLFpkX7F0SwHNtPoaFdG3xyt93g=" + }, + "combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "requires": { + "delayed-stream": "~1.0.0" + } + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + }, + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + } + } + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "requires": { + "assert-plus": "^1.0.0" + } + }, + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=" + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=" + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" + }, + "diagnostics": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/diagnostics/-/diagnostics-1.1.1.tgz", + "integrity": "sha512-8wn1PmdunLJ9Tqbx+Fx/ZEuHfJf4NKSN2ZBj7SJC/OWRWha843+WsTjqMe1B5E3p28jqBlp+mJ2fPVxPyNgYKQ==", + "requires": { + "colorspace": "1.1.x", + "enabled": "1.0.x", + "kuler": "1.0.x" + } + }, + "doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "requires": { + "esutils": "^2.0.2" + } + }, + "dtrace-provider": { + "version": "0.8.8", + "resolved": "https://registry.npmjs.org/dtrace-provider/-/dtrace-provider-0.8.8.tgz", + "integrity": "sha512-b7Z7cNtHPhH9EJhNNbbeqTcXB8LGFFZhq1PGgEvpeHlzd36bhbdTWoE/Ba/YguqpBSlAPKnARWhVlhunCMwfxg==", + "optional": true, + "requires": { + "nan": "^2.14.0" + } + }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + }, + "enabled": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-1.0.2.tgz", + "integrity": "sha1-ll9lE9LC0cX0ZStkouM5ZGf8L5M=", + "requires": { + "env-variable": "0.0.x" + } + }, + "env-variable": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/env-variable/-/env-variable-0.0.5.tgz", + "integrity": "sha512-zoB603vQReOFvTg5xMl9I1P2PnHsHQQKTEowsKKD7nseUfJq6UWzK+4YtlWUO1nhiQUxe6XMkk+JleSZD1NZFA==" + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" + }, + "eslint": { + "version": "6.6.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.6.0.tgz", + "integrity": "sha512-PpEBq7b6qY/qrOmpYQ/jTMDYfuQMELR4g4WI1M/NaSDDD/bdcMb+dj4Hgks7p41kW2caXsPsEZAEAyAgjVVC0g==", + "requires": { + "@babel/code-frame": "^7.0.0", + "ajv": "^6.10.0", + "chalk": "^2.1.0", + "cross-spawn": "^6.0.5", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "eslint-scope": "^5.0.0", + "eslint-utils": "^1.4.3", + "eslint-visitor-keys": "^1.1.0", + "espree": "^6.1.2", + "esquery": "^1.0.1", + "esutils": "^2.0.2", + "file-entry-cache": "^5.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.0.0", + "globals": "^11.7.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "inquirer": "^7.0.0", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.3.0", + "lodash": "^4.17.14", + "minimatch": "^3.0.4", + "mkdirp": "^0.5.1", + "natural-compare": "^1.4.0", + "optionator": "^0.8.2", + "progress": "^2.0.0", + "regexpp": "^2.0.1", + "semver": "^6.1.2", + "strip-ansi": "^5.2.0", + "strip-json-comments": "^3.0.1", + "table": "^5.2.3", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "requires": { + "ansi-regex": "^4.1.0" + } + } + } + }, + "eslint-scope": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz", + "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==", + "requires": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + } + }, + "eslint-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", + "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "eslint-visitor-keys": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz", + "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==" + }, + "espree": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/espree/-/espree-6.1.2.tgz", + "integrity": "sha512-2iUPuuPP+yW1PZaMSDM9eyVf8D5P0Hi8h83YtZ5bPc/zHYjII5khoixIUTMO794NOY8F/ThF1Bo8ncZILarUTA==", + "requires": { + "acorn": "^7.1.0", + "acorn-jsx": "^5.1.0", + "eslint-visitor-keys": "^1.1.0" + } + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + }, + "esquery": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.0.1.tgz", + "integrity": "sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA==", + "requires": { + "estraverse": "^4.0.0" + } + }, + "esrecurse": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", + "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", + "requires": { + "estraverse": "^4.1.0" + } + }, + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==" + }, + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" + }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "requires": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + } + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" + }, + "fast-deep-equal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz", + "integrity": "sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk=" + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", + "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=" + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=" + }, + "fast-safe-stringify": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.6.tgz", + "integrity": "sha512-q8BZ89jjc+mz08rSxROs8VsrBBcn1SIw1kq9NjolL509tkABRk9io01RAjSaEv1Xb2uFLt8VtRiZbGp5H8iDtg==" + }, + "fecha": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fecha/-/fecha-2.3.3.tgz", + "integrity": "sha512-lUGBnIamTAwk4znq5BcqsDaxSmZ9nDVJaij6NvRt/Tg4R69gERA+otPKbS86ROw9nxVMw2/mp1fnaiWqbs6Sdg==" + }, + "figures": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.1.0.tgz", + "integrity": "sha512-ravh8VRXqHuMvZt/d8GblBeqDMkdJMBdv/2KntFH+ra5MXkO7nxNKpzQ3n6QD/2da1kH0aWmNISdvhM7gl2gVg==", + "requires": { + "escape-string-regexp": "^1.0.5" + } + }, + "file-entry-cache": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", + "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", + "requires": { + "flat-cache": "^2.0.1" + } + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "flat-cache": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", + "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", + "requires": { + "flatted": "^2.0.0", + "rimraf": "2.6.3", + "write": "1.0.3" + } + }, + "flatted": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.1.tgz", + "integrity": "sha512-a1hQMktqW9Nmqr5aktAux3JMNqaucxGcjtjWnZLHX7yyPCmlSV3M54nGYbqT8K+0GhF3NBgmJCc3ma+WOgX8Jg==" + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" + }, + "form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "requires": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, + "functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=" + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "requires": { + "assert-plus": "^1.0.0" + } + }, + "glob": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz", + "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==", + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.0.tgz", + "integrity": "sha512-qjtRgnIVmOfnKUE3NJAQEdk+lKrxfw8t5ke7SXtfMTHcjsBfOfWXCQfdb30zfDoZQ2IRSIiidmjtbHZPZ++Ihw==", + "requires": { + "is-glob": "^4.0.1" + } + }, + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==" + }, + "grpc": { + "version": "1.24.2", + "resolved": "https://registry.npmjs.org/grpc/-/grpc-1.24.2.tgz", + "integrity": "sha512-EG3WH6AWMVvAiV15d+lr+K77HJ/KV/3FvMpjKjulXHbTwgDZkhkcWbwhxFAoTdxTkQvy0WFcO3Nog50QBbHZWw==", + "requires": { + "@types/bytebuffer": "^5.0.40", + "lodash.camelcase": "^4.3.0", + "lodash.clone": "^4.5.0", + "nan": "^2.13.2", + "node-pre-gyp": "^0.14.0", + "protobufjs": "^5.0.3" + }, + "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true + }, + "aproba": { + "version": "1.2.0", + "bundled": true + }, + "are-we-there-yet": { + "version": "1.1.5", + "bundled": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "chownr": { + "version": "1.1.3", + "bundled": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true + }, + "debug": { + "version": "3.2.6", + "bundled": true, + "requires": { + "ms": "^2.1.1" + } + }, + "deep-extend": { + "version": "0.6.0", + "bundled": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true + }, + "detect-libc": { + "version": "1.0.3", + "bundled": true + }, + "fs-minipass": { + "version": "1.2.7", + "bundled": true, + "requires": { + "minipass": "^2.6.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "requires": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "glob": { + "version": "7.1.4", + "bundled": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true + }, + "iconv-lite": { + "version": "0.4.24", + "bundled": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ignore-walk": { + "version": "3.0.3", + "bundled": true, + "requires": { + "minimatch": "^3.0.4" + } + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "bundled": true + }, + "ini": { + "version": "1.3.5", + "bundled": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "isarray": { + "version": "1.0.0", + "bundled": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.0", + "bundled": true + }, + "minipass": { + "version": "2.9.0", + "bundled": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "minizlib": { + "version": "1.3.3", + "bundled": true, + "requires": { + "minipass": "^2.9.0" + } + }, + "mkdirp": { + "version": "0.5.1", + "bundled": true, + "requires": { + "minimist": "0.0.8" + }, + "dependencies": { + "minimist": { + "version": "0.0.8", + "bundled": true + } + } + }, + "ms": { + "version": "2.1.2", + "bundled": true + }, + "needle": { + "version": "2.4.0", + "bundled": true, + "requires": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + } + }, + "node-pre-gyp": { + "version": "0.14.0", + "bundled": true, + "requires": { + "detect-libc": "^1.0.2", + "mkdirp": "^0.5.1", + "needle": "^2.2.1", + "nopt": "^4.0.1", + "npm-packlist": "^1.1.6", + "npmlog": "^4.0.2", + "rc": "^1.2.7", + "rimraf": "^2.6.1", + "semver": "^5.3.0", + "tar": "^4.4.2" + } + }, + "nopt": { + "version": "4.0.1", + "bundled": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + }, + "npm-bundled": { + "version": "1.0.6", + "bundled": true + }, + "npm-packlist": { + "version": "1.4.6", + "bundled": true, + "requires": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1" + } + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "requires": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "requires": { + "wrappy": "1" + } + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true + }, + "process-nextick-args": { + "version": "2.0.1", + "bundled": true + }, + "rc": { + "version": "1.2.8", + "bundled": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + } + }, + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "rimraf": { + "version": "2.7.1", + "bundled": true, + "requires": { + "glob": "^7.1.3" + } + }, + "safe-buffer": { + "version": "5.1.2", + "bundled": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true + }, + "sax": { + "version": "1.2.4", + "bundled": true + }, + "semver": { + "version": "5.7.1", + "bundled": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true + }, + "signal-exit": { + "version": "3.0.2", + "bundled": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true + }, + "tar": { + "version": "4.4.13", + "bundled": true, + "requires": { + "chownr": "^1.1.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.2", + "yallist": "^3.0.3" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true + }, + "wide-align": { + "version": "1.1.3", + "bundled": true, + "requires": { + "string-width": "^1.0.2 || 2" + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true + }, + "yallist": { + "version": "3.1.1", + "bundled": true + } + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" + }, + "har-validator": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz", + "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==", + "requires": { + "ajv": "^6.5.5", + "har-schema": "^2.0.0" + } + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "requires": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + } + }, + "iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==" + }, + "import-fresh": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz", + "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==", + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + } + }, + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=" + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "inquirer": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.0.0.tgz", + "integrity": "sha512-rSdC7zelHdRQFkWnhsMu2+2SO41mpv2oF2zy4tMhmiLWkcKbOAs87fWAJhVXttKVwhdZvymvnuM95EyEXg2/tQ==", + "requires": { + "ansi-escapes": "^4.2.1", + "chalk": "^2.4.2", + "cli-cursor": "^3.1.0", + "cli-width": "^2.0.0", + "external-editor": "^3.0.3", + "figures": "^3.0.0", + "lodash": "^4.17.15", + "mute-stream": "0.0.8", + "run-async": "^2.2.0", + "rxjs": "^6.4.0", + "string-width": "^4.1.0", + "strip-ansi": "^5.1.0", + "through": "^2.3.6" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "requires": { + "ansi-regex": "^4.1.0" + } + } + } + }, + "invert-kv": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", + "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=" + }, + "is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=" + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" + }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-promise": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.1.0.tgz", + "integrity": "sha1-eaKp7OfwlugPNtKy87wWwf9L8/o=" + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "js-yaml": { + "version": "3.13.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", + "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=" + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "kuler": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-1.0.1.tgz", + "integrity": "sha512-J9nVUucG1p/skKul6DU3PUZrhs0LPulNaeUOox0IyXDi8S4CztTHs1gQphhuZmzXG7VOQSf6NJfKuzteQLv9gQ==", + "requires": { + "colornames": "^1.1.1" + } + }, + "lcid": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", + "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", + "requires": { + "invert-kv": "^1.0.0" + } + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "requires": { + "p-locate": "^4.1.0" + } + }, + "lodash": { + "version": "4.17.15", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", + "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==" + }, + "lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" + }, + "lodash.clone": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz", + "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=" + }, + "logform": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/logform/-/logform-2.1.2.tgz", + "integrity": "sha512-+lZh4OpERDBLqjiwDLpAWNQu6KMjnlXH2ByZwCuSqVPJletw0kTWJf5CgSNAUKn1KUkv3m2cUz/LK8zyEy7wzQ==", + "requires": { + "colors": "^1.2.1", + "fast-safe-stringify": "^2.0.4", + "fecha": "^2.3.3", + "ms": "^2.1.1", + "triple-beam": "^1.3.0" + } + }, + "long": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/long/-/long-3.2.0.tgz", + "integrity": "sha1-2CG3E4yhy1gcFymQ7xTbIAtcR0s=" + }, + "lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "requires": { + "yallist": "^3.0.2" + } + }, + "mime-db": { + "version": "1.40.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.40.0.tgz", + "integrity": "sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA==" + }, + "mime-types": { + "version": "2.1.24", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.24.tgz", + "integrity": "sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==", + "requires": { + "mime-db": "1.40.0" + } + }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "requires": { + "minimist": "0.0.8" + } + }, + "moment": { + "version": "2.24.0", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.24.0.tgz", + "integrity": "sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg==", + "optional": true + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==" + }, + "mv": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/mv/-/mv-2.1.1.tgz", + "integrity": "sha1-rmzg1vbV4KT32JN5jQPB6pVZtqI=", + "optional": true, + "requires": { + "mkdirp": "~0.5.1", + "ncp": "~2.0.0", + "rimraf": "~2.4.0" + }, + "dependencies": { + "glob": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/glob/-/glob-6.0.4.tgz", + "integrity": "sha1-DwiGD2oVUSey+t1PnOJLGqtuTSI=", + "optional": true, + "requires": { + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "2 || 3", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "rimraf": { + "version": "2.4.5", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.4.5.tgz", + "integrity": "sha1-7nEM5dk6j9uFb7Xqj/Di11k0sto=", + "optional": true, + "requires": { + "glob": "^6.0.1" + } + } + } + }, + "nan": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.0.tgz", + "integrity": "sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg==" + }, + "natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=" + }, + "ncp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ncp/-/ncp-2.0.0.tgz", + "integrity": "sha1-GVoh1sRuNh0vsSgbo4uR6d9727M=", + "optional": true + }, + "nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" + }, + "number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" + }, + "oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "requires": { + "wrappy": "1" + } + }, + "one-time": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/one-time/-/one-time-0.0.4.tgz", + "integrity": "sha1-+M33eISCb+Tf+T46nMN7HkSAdC4=" + }, + "onetime": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz", + "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==", + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + } + }, + "optjs": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/optjs/-/optjs-3.2.2.tgz", + "integrity": "sha1-aabOicRCpEQDFBrS+bNwvVu29O4=" + }, + "os-locale": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", + "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=", + "requires": { + "lcid": "^1.0.0" + } + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=" + }, + "p-limit": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.1.tgz", + "integrity": "sha512-85Tk+90UCVWvbDavCLKPOLC9vvY8OwEX/RtKF+/1OADJMVlFfEHOiMTPVyxg7mk/dKa+ipdHm0OUkTvCpMTuwg==", + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" + }, + "parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "requires": { + "callsites": "^3.0.0" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=" + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==" + }, + "protobufjs": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-5.0.3.tgz", + "integrity": "sha512-55Kcx1MhPZX0zTbVosMQEO5R6/rikNXd9b6RQK4KSPcrSIIwoXTtebIczUrXlwaSrbz4x8XUVThGPob1n8I4QA==", + "requires": { + "ascli": "~1", + "bytebuffer": "~5", + "glob": "^7.0.5", + "yargs": "^3.10.0" + }, + "dependencies": { + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=" + }, + "camelcase": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", + "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=" + }, + "cliui": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", + "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", + "requires": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wrap-ansi": "^2.0.0" + } + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "wrap-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", + "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", + "requires": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1" + } + }, + "y18n": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", + "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=" + }, + "yargs": { + "version": "3.32.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.32.0.tgz", + "integrity": "sha1-AwiOnr+edWtpdRYR0qXvWRSCyZU=", + "requires": { + "camelcase": "^2.0.1", + "cliui": "^3.0.3", + "decamelize": "^1.1.1", + "os-locale": "^1.4.0", + "string-width": "^1.0.1", + "window-size": "^0.1.4", + "y18n": "^3.2.0" + } + } + } + }, + "psl": { + "version": "1.1.33", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.1.33.tgz", + "integrity": "sha512-LTDP2uSrsc7XCb5lO7A8BI1qYxRe/8EqlRvMeEl6rsnYAqDOl8xHR+8lSAIVfrNaSAlTPTNOCgNjWcoUL3AZsw==" + }, + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" + }, + "qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + }, + "readable-stream": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", + "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "regexpp": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", + "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==" + }, + "request": { + "version": "2.88.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz", + "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==", + "requires": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.0", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.4.3", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + } + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" + }, + "require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + }, + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==" + }, + "restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "requires": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + } + }, + "rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "requires": { + "glob": "^7.1.3" + } + }, + "run-async": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.3.0.tgz", + "integrity": "sha1-A3GrSuC91yDUFm19/aZP96RFpsA=", + "requires": { + "is-promise": "^2.1.0" + } + }, + "rxjs": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.3.tgz", + "integrity": "sha512-wuYsAYYFdWTAnAaPoKGNhfpWwKZbJW+HgAJ+mImp+Epl7BG8oNWBCTyRM8gba9k4lk8BgWdoYm21Mo/RYhhbgA==", + "requires": { + "tslib": "^1.9.0" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "safe-json-stringify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/safe-json-stringify/-/safe-json-stringify-1.2.0.tgz", + "integrity": "sha512-gH8eh2nZudPQO6TytOvbxnuhYBOvDBBLW52tz5q6X58lJcd/tkmqFR+5Z9adS8aJtURSXWThWy/xJtJwixErvg==", + "optional": true + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=" + }, + "simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", + "requires": { + "is-arrayish": "^0.3.1" + } + }, + "slice-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", + "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", + "requires": { + "ansi-styles": "^3.2.0", + "astral-regex": "^1.0.0", + "is-fullwidth-code-point": "^2.0.0" + }, + "dependencies": { + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" + } + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" + }, + "ssh2": { + "version": "0.8.6", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-0.8.6.tgz", + "integrity": "sha512-T0cPmEtmtC8WxSupicFDjx3vVUdNXO8xu2a/D5bjt8ixOUCe387AgvxU3mJgEHpu7+Sq1ZYx4d3P2pl/yxMH+w==", + "requires": { + "ssh2-streams": "~0.4.7" + } + }, + "ssh2-streams": { + "version": "0.4.7", + "resolved": "https://registry.npmjs.org/ssh2-streams/-/ssh2-streams-0.4.7.tgz", + "integrity": "sha512-JhF8BNfeguOqVHOLhXjzLlRKlUP8roAEhiT/y+NcBQCqpRUupLNrRf2M+549OPNVGx21KgKktug4P3MY/IvTig==", + "requires": { + "asn1": "~0.2.0", + "bcrypt-pbkdf": "^1.0.2", + "streamsearch": "~0.1.2" + } + }, + "sshpk": { + "version": "1.16.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", + "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + }, + "stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=" + }, + "streamsearch": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-0.1.2.tgz", + "integrity": "sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo=" + }, + "string-width": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", + "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "dependencies": { + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + } + } + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "strip-json-comments": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz", + "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==" + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "requires": { + "has-flag": "^3.0.0" + } + }, + "table": { + "version": "5.4.6", + "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", + "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", + "requires": { + "ajv": "^6.10.2", + "lodash": "^4.17.14", + "slice-ansi": "^2.1.0", + "string-width": "^3.0.0" + }, + "dependencies": { + "ajv": { + "version": "6.10.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.10.2.tgz", + "integrity": "sha512-TXtUUEYHuaTEbLZWIKUr5pmBuhDLy+8KYtPYdcV8qC+pOZL+NKqYwvWSRrVXHn+ZmRRAu8vJTAznH7Oag6RVRw==", + "requires": { + "fast-deep-equal": "^2.0.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "requires": { + "ansi-regex": "^4.1.0" + } + } + } + }, + "text-hex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", + "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==" + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" + }, + "through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" + }, + "tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "requires": { + "os-tmpdir": "~1.0.2" + } + }, + "tough-cookie": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz", + "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==", + "requires": { + "psl": "^1.1.24", + "punycode": "^1.4.1" + }, + "dependencies": { + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" + } + } + }, + "triple-beam": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", + "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" + }, + "tslib": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.10.0.tgz", + "integrity": "sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ==" + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "requires": { + "safe-buffer": "^5.0.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "requires": { + "prelude-ls": "~1.1.2" + } + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==" + }, + "uri-js": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", + "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "requires": { + "punycode": "^2.1.0" + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + }, + "uuid": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.3.tgz", + "integrity": "sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ==" + }, + "v8-compile-cache": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.0.tgz", + "integrity": "sha512-usZBT3PW+LOjM25wbqIlZwPeJV+3OSz3M1k1Ws8snlW39dZyYL9lOGC5FgPVHfk0jKmjiDV8Z0mIbVQPiwFs7g==" + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "requires": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "requires": { + "isexe": "^2.0.0" + } + }, + "which-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" + }, + "window-size": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz", + "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=" + }, + "winston": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/winston/-/winston-3.2.1.tgz", + "integrity": "sha512-zU6vgnS9dAWCEKg/QYigd6cgMVVNwyTzKs81XZtTFuRwJOcDdBg7AU0mXVyNbs7O5RH2zdv+BdNZUlx7mXPuOw==", + "requires": { + "async": "^2.6.1", + "diagnostics": "^1.1.1", + "is-stream": "^1.1.0", + "logform": "^2.1.1", + "one-time": "0.0.4", + "readable-stream": "^3.1.1", + "stack-trace": "0.0.x", + "triple-beam": "^1.3.0", + "winston-transport": "^4.3.0" + }, + "dependencies": { + "readable-stream": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.4.0.tgz", + "integrity": "sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ==", + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } + } + }, + "winston-transport": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.3.0.tgz", + "integrity": "sha512-B2wPuwUi3vhzn/51Uukcao4dIduEiPOcOt9HJ3QeaXgkJ5Z7UwpBzxS4ZGNHtrxrUvTwemsQiSys0ihOf8Mp1A==", + "requires": { + "readable-stream": "^2.3.6", + "triple-beam": "^1.2.0" + } + }, + "word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==" + }, + "wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.0.tgz", + "integrity": "sha512-7kFQgnEaMdRtwf6uSfUnVr9gSGC7faurn+J/Mv90/W+iTtN0405/nLdopfMWwchyxhbGYl6TC4Sccn9TUkGAgg==", + "requires": { + "@types/color-name": "^1.1.1", + "color-convert": "^2.0.1" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + } + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + }, + "write": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", + "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", + "requires": { + "mkdirp": "^0.5.1" + } + }, + "y18n": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", + "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==" + }, + "yallist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.3.tgz", + "integrity": "sha512-S+Zk8DEWE6oKpV+vI3qWkaK+jSbIK86pCwe2IF/xwIpQ8jEuxpw9NyaGjmp9+BoJv5FV2piqCDcoCtStppiq2A==" + }, + "yargs": { + "version": "15.0.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.0.2.tgz", + "integrity": "sha512-GH/X/hYt+x5hOat4LMnCqMd8r5Cv78heOMIJn1hr7QPPBqfeC6p89Y78+WB9yGDvfpCvgasfmWLzNzEioOUD9Q==", + "requires": { + "cliui": "^6.0.0", + "decamelize": "^1.2.0", + "find-up": "^4.1.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^4.2.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^16.1.0" + } + }, + "yargs-parser": { + "version": "16.1.0", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-16.1.0.tgz", + "integrity": "sha512-H/V41UNZQPkUMIT5h5hiwg4QKIY1RPvoBV4XcjUbRM8Bk2oKqqyZ0DIEbTFZB0XjbtSPG8SAa/0DxCQmiRgzKg==", + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..88ac6dd --- /dev/null +++ b/package.json @@ -0,0 +1,34 @@ +{ + "name": "democratic-csi", + "version": "0.1.0", + "description": "kubernetes csi driver framework", + "main": "bin/democratic-csi", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1", + "start": "./bin/democratic-csi" + }, + "author": "Travis Glenn Hansen ", + "bugs": { + "url": "https://github.com/democratic-csi/democratic-csi/issues" + }, + "homepage": "https://github.com/democratic-csi/democratic-csi#readme", + "license": "MIT", + "repository": { + "type": "git", + "url": "https://github.com/democratic-csi/democratic-csi.git" + }, + "dependencies": { + "@grpc/proto-loader": "^0.5.3", + "bunyan": "^1.8.12", + "eslint": "^6.6.0", + "grpc": "^1.24.2", + "js-yaml": "^3.13.1", + "lru-cache": "^5.1.1", + "request": "^2.88.0", + "ssh2": "^0.8.6", + "uri-js": "^4.2.2", + "uuid": "^3.3.3", + "winston": "^3.2.1", + "yargs": "^15.0.2" + } +} diff --git a/src/driver/controller-zfs-ssh/index.js b/src/driver/controller-zfs-ssh/index.js new file mode 100644 index 0000000..2e054cd --- /dev/null +++ b/src/driver/controller-zfs-ssh/index.js @@ -0,0 +1,1674 @@ +const grpc = require("grpc"); +const { CsiBaseDriver } = require("../index"); +const SshClient = require("../../utils/ssh").SshClient; +const { GrpcError } = require("../../utils/grpc"); + +const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs"); + +// zfs common properties +const MANAGED_PROPERTY_NAME = "democratic-csi:managed_resource"; +const SUCCESS_PROPERTY_NAME = "democratic-csi:provision_success"; +const VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX = "volume-source-for-volume-"; +const VOLUME_SOURCE_DETACHED_SNAPSHOT_PREFIX = "volume-source-for-snapshot-"; +const VOLUME_CSI_NAME_PROPERTY_NAME = "democratic-csi:csi_volume_name"; +const SHARE_VOLUME_CONTEXT_PROPERTY_NAME = + "democratic-csi:csi_share_volume_context"; +const VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME = + "democratic-csi:csi_volume_content_source_type"; +const VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME = + "democratic-csi:csi_volume_content_source_id"; +const SNAPSHOT_CSI_NAME_PROPERTY_NAME = "democratic-csi:csi_snapshot_name"; +const SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME = + "democratic-csi:csi_snapshot_source_volume_id"; + +/** + * Base driver to provisin zfs assets over ssh. + * Derived drivers only need to implement: + * - getDriverZfsResourceType() // return "filesystem" or "volume" + * - async createShare(call, datasetName) // return appropriate volume_context for Node operations + * - async deleteShare(call, datasetName) // no return expected + * - async expandVolume(call, datasetName) // no return expected, used for restarting services etc if needed + */ +class ControllerZfsSshBaseDriver extends CsiBaseDriver { + constructor(ctx, options) { + super(...arguments); + + options.service.identity.capabilities = + options.service.identity.capabilities || {}; + + options.service.controller.capabilities = + options.service.controller.capabilities || {}; + + options.service.node.capabilities = options.service.node.capabilities || {}; + + if (!("service" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity service caps"); + + options.service.identity.capabilities.service = [ + //"UNKNOWN", + "CONTROLLER_SERVICE" + //"VOLUME_ACCESSIBILITY_CONSTRAINTS" + ]; + } + + if (!("volume_expansion" in options.service.identity.capabilities)) { + this.ctx.logger.debug("setting default identity volume_expansion caps"); + + options.service.identity.capabilities.volume_expansion = [ + //"UNKNOWN", + "ONLINE" + //"OFFLINE" + ]; + } + + if (!("rpc" in options.service.controller.capabilities)) { + this.ctx.logger.debug("setting default controller caps"); + + options.service.controller.capabilities.rpc = [ + //"UNKNOWN", + "CREATE_DELETE_VOLUME", + //"PUBLISH_UNPUBLISH_VOLUME", + "LIST_VOLUMES", + "GET_CAPACITY", + "CREATE_DELETE_SNAPSHOT", + "LIST_SNAPSHOTS", + "CLONE_VOLUME", + //"PUBLISH_READONLY", + "EXPAND_VOLUME" + ]; + } + + if (!("rpc" in options.service.node.capabilities)) { + this.ctx.logger.debug("setting default node caps"); + + switch (this.getDriverZfsResourceType()) { + case "filesystem": + options.service.node.capabilities.rpc = [ + //"UNKNOWN", + "STAGE_UNSTAGE_VOLUME", + "GET_VOLUME_STATS" + //"EXPAND_VOLUME" + ]; + break; + case "volume": + options.service.node.capabilities.rpc = [ + //"UNKNOWN", + "STAGE_UNSTAGE_VOLUME", + "GET_VOLUME_STATS", + "EXPAND_VOLUME" + ]; + break; + } + } + } + + getSshClient() { + return new SshClient({ + logger: this.ctx.logger, + connection: this.options.sshConnection + }); + } + + getZetabyte() { + const sshClient = this.getSshClient(); + return new Zetabyte({ + executor: new ZfsSshProcessManager(sshClient), + idempotent: true + }); + } + + getDatasetParentName() { + let datasetParentName = this.options.zfs.datasetParentName; + datasetParentName = datasetParentName.replace(/\/$/, ""); + return datasetParentName; + } + + getVolumeParentDatasetName() { + let datasetParentName = this.getDatasetParentName(); + //datasetParentName += "/v"; + datasetParentName = datasetParentName.replace(/\/$/, ""); + return datasetParentName; + } + + getDetachedSnapshotParentDatasetName() { + //let datasetParentName = this.getDatasetParentName(); + let datasetParentName = this.options.zfs.detachedSnapshotsDatasetParentName; + //datasetParentName += "/s"; + datasetParentName = datasetParentName.replace(/\/$/, ""); + return datasetParentName; + } + + async removeSnapshotsFromDatatset(datasetName, options = {}) { + const zb = this.getZetabyte(); + + await zb.zfs.destroy(datasetName + "@%", options); + } + + assertCapabilities(capabilities) { + const driverZfsResourceType = this.getDriverZfsResourceType(); + this.ctx.logger.verbose("validating capabilities: %j", capabilities); + + let message = null; + //[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}] + const valid = capabilities.every(capability => { + switch (driverZfsResourceType) { + case "filesystem": + if (capability.access_type != "mount") { + message = `invalid access_type ${capability.access_type}`; + return false; + } + + if ( + capability.mount.fs_type && + !["nfs"].includes(capability.mount.fs_type) + ) { + message = `invalid fs_type ${capability.mount.fs_type}`; + return false; + } + + if ( + ![ + "UNKNOWN", + "SINGLE_NODE_WRITER", + "SINGLE_NODE_READER_ONLY", + "MULTI_NODE_READER_ONLY", + "MULTI_NODE_SINGLE_WRITER", + "MULTI_NODE_MULTI_WRITER" + ].includes(capability.access_mode.mode) + ) { + message = `invalid access_mode, ${capability.access_mode.mode}`; + return false; + } + + return true; + case "volume": + if (capability.access_type == "mount") { + if ( + capability.mount.fs_type && + !["ext3", "ext4", "ext4dev", "xfs"].includes( + capability.mount.fs_type + ) + ) { + message = `invalid fs_type ${capability.mount.fs_type}`; + return false; + } + } + + if ( + ![ + "UNKNOWN", + "SINGLE_NODE_WRITER", + "SINGLE_NODE_READER_ONLY", + "MULTI_NODE_READER_ONLY", + "MULTI_NODE_SINGLE_WRITER" + ].includes(capability.access_mode.mode) + ) { + message = `invalid access_mode, ${capability.access_mode.mode}`; + return false; + } + + return true; + } + }); + + return { valid, message }; + } + + /** + * Create a volume doing in essence the following: + * 1. create dataset + * 2. create nfs share + * + * Should return 2 parameters + * 1. `server` - host/ip of the nfs server + * 2. `share` - path of the mount shared + * + * @param {*} call + */ + async CreateVolume(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const sshClient = this.getSshClient(); + const zb = this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName(); + let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K"; + let name = call.request.name; + let volume_content_source = call.request.volume_content_source; + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume name is required` + ); + } + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + if (result.valid !== true) { + throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message); + } + } + + if ( + call.request.capacity_range.required_bytes > 0 && + call.request.capacity_range.limit_bytes > 0 && + call.request.capacity_range.required_bytes > + call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required_bytes is greather than limit_bytes` + ); + } + + const datasetName = datasetParentName + "/" + name; + let capacity_bytes = + call.request.capacity_range.required_bytes || + call.request.capacity_range.limit_bytes; + + if (capacity_bytes && driverZfsResourceType == "volume") { + //make sure to align capacity_bytes with zvol blocksize + //volume size must be a multiple of volume block size + capacity_bytes = zb.helpers.generateZvolSize( + capacity_bytes, + zvolBlocksize + ); + } + if (!capacity_bytes) { + //should never happen, value must be set + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume capacity is required (either required_bytes or limit_bytes)` + ); + } + + // ensure *actual* capacity is not greater than limit + if ( + call.request.capacity_range.limit_bytes && + call.request.capacity_range.limit_bytes > 0 && + capacity_bytes > call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required volume capacity is greater than limit` + ); + } + + /** + * This is specifically a FreeBSD limitation, not sure what linux limit is + * https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab + * https://www.ixsystems.com/documentation/freenas/11.3-BETA1/intro.html#path-and-name-lengths + * https://www.freebsd.org/cgi/man.cgi?query=devfs + */ + if (driverZfsResourceType == "volume") { + let extentDiskName = "zvol/" + datasetName; + if (extentDiskName.length > 63) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `extent disk name cannot exceed 63 characters: ${extentDiskName}` + ); + } + } + + let response, command; + let volume_content_source_snapshot_id; + let volume_content_source_volume_id; + let fullSnapshotName; + let volumeProperties = {}; + volumeProperties[VOLUME_CSI_NAME_PROPERTY_NAME] = name; + volumeProperties[MANAGED_PROPERTY_NAME] = "true"; + + // TODO: also set access_mode as property? + // TODO: also set fsType as property? + // TODO: allow for users to configure arbitrary/custom properties to add + + // zvol enables reservation by default + // this implements 'sparse' zvols + if (driverZfsResourceType == "volume") { + if (!this.options.zfs.zvolEnableReservation) { + volumeProperties.refreservation = 0; + } + } + + let detachedClone = false; + + // create dataset + if (volume_content_source) { + volumeProperties[VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME] = + volume_content_source.type; + switch (volume_content_source.type) { + // must be available when adverstising CREATE_DELETE_SNAPSHOT + // simply clone + case "snapshot": + try { + let tmpDetachedClone = JSON.parse( + call.request.parameters.detachedVolumesFromSnapshots + ); + if (typeof tmpDetachedClone === "boolean") { + detachedClone = tmpDetachedClone; + } + } catch (e) {} + + volumeProperties[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME] = + volume_content_source.snapshot.snapshot_id; + volume_content_source_snapshot_id = + volume_content_source.snapshot.snapshot_id; + + // zfs origin property contains parent info, ie: pool0/k8s/test/PVC-111@clone-test + if (zb.helpers.isZfsSnapshot(volume_content_source_snapshot_id)) { + fullSnapshotName = + datasetParentName + "/" + volume_content_source_snapshot_id; + } else { + fullSnapshotName = + snapshotParentDatasetName + + "/" + + volume_content_source_snapshot_id + + "@" + + VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX + + name; + } + + driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName); + + if (!zb.helpers.isZfsSnapshot(volume_content_source_snapshot_id)) { + try { + await zb.zfs.snapshot(fullSnapshotName); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_snapshot_id ${volume_content_source_snapshot_id} does not exist` + ); + } + + throw err; + } + } + + if (detachedClone) { + try { + response = await zb.zfs.send_receive( + fullSnapshotName, + [], + datasetName, + [] + ); + + response = await zb.zfs.set(datasetName, volumeProperties); + } catch (err) { + if ( + err.toString().includes("destination") && + err.toString().includes("exists") + ) { + // move along + } else { + throw err; + } + } + + // remove snapshots from target + await this.removeSnapshotsFromDatatset(datasetName, { + force: true + }); + } else { + try { + response = await zb.zfs.clone(fullSnapshotName, datasetName, { + properties: volumeProperties + }); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "dataset does not exists" + ); + } + + throw err; + } + } + + if (!zb.helpers.isZfsSnapshot(volume_content_source_snapshot_id)) { + try { + // schedule snapshot removal from source + await zb.zfs.destroy(fullSnapshotName, { + recurse: true, + force: true, + defer: true + }); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_snapshot_id ${volume_content_source_snapshot_id} does not exist` + ); + } + + throw err; + } + } + + break; + // must be available when adverstising CLONE_VOLUME + // create snapshot first, then clone + case "volume": + try { + let tmpDetachedClone = JSON.parse( + call.request.parameters.detachedVolumesFromVolumes + ); + if (typeof tmpDetachedClone === "boolean") { + detachedClone = tmpDetachedClone; + } + } catch (e) {} + + volumeProperties[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME] = + volume_content_source.volume.volume_id; + volume_content_source_volume_id = + volume_content_source.volume.volume_id; + + fullSnapshotName = + datasetParentName + + "/" + + volume_content_source_volume_id + + "@" + + VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX + + name; + + driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName); + + // create snapshot + try { + response = await zb.zfs.snapshot(fullSnapshotName); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "dataset does not exists" + ); + } + + throw err; + } + + if (detachedClone) { + try { + response = await zb.zfs.send_receive( + fullSnapshotName, + [], + datasetName, + [] + ); + } catch (err) { + if ( + err.toString().includes("destination") && + err.toString().includes("exists") + ) { + // move along + } else { + throw err; + } + } + + response = await zb.zfs.set(datasetName, volumeProperties); + + // remove snapshots from target + await this.removeSnapshotsFromDatatset(datasetName, { + force: true + }); + + // remove snapshot from source + await zb.zfs.destroy(fullSnapshotName, { + recurse: true, + force: true, + defer: true + }); + } else { + // create clone + // zfs origin property contains parent info, ie: pool0/k8s/test/PVC-111@clone-test + try { + response = await zb.zfs.clone(fullSnapshotName, datasetName, { + properties: volumeProperties + }); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "dataset does not exists" + ); + } + + throw err; + } + } + break; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `invalid volume_content_source type: ${volume_content_source.type}` + ); + break; + } + } else { + // force blocksize on newly created zvols + if (driverZfsResourceType == "volume") { + volumeProperties.volblocksize = zvolBlocksize; + } + + await zb.zfs.create(datasetName, { + parents: true, + properties: volumeProperties, + size: driverZfsResourceType == "volume" ? capacity_bytes : false + }); + } + + let setProps = false; + let properties = {}; + let volume_context = {}; + + switch (driverZfsResourceType) { + case "filesystem": + // set quota + if (this.options.zfs.datasetEnableQuotas) { + setProps = true; + properties.refquota = capacity_bytes; + } + + // set reserve + if (this.options.zfs.datasetEnableReservation) { + setProps = true; + properties.refreservation = capacity_bytes; + } + + // quota for dataset and all children + // reserved for dataset and all children + + // dedup + // ro? + // record size + + // set properties + if (setProps) { + await zb.zfs.set(datasetName, properties); + } + + //datasetPermissionsMode: 0777, + //datasetPermissionsUser: "root", + //datasetPermissionsGroup: "wheel", + + // get properties needed for remaining calls + properties = await zb.zfs.get(datasetName, [ + "mountpoint", + "refquota", + "compression", + VOLUME_CSI_NAME_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME + ]); + properties = properties[datasetName]; + driver.ctx.logger.debug("zfs props data: %j", properties); + + // set mode + if (this.options.zfs.datasetPermissionsMode) { + command = sshClient.buildCommand("chmod", [ + this.options.zfs.datasetPermissionsMode, + properties.mountpoint.value + ]); + driver.ctx.logger.verbose("set permission command: %s", command); + response = await sshClient.exec(command); + } + + // set ownership + if ( + this.options.zfs.datasetPermissionsUser || + this.options.zfs.datasetPermissionsGroup + ) { + command = sshClient.buildCommand("chown", [ + (this.options.zfs.datasetPermissionsUser + ? this.options.zfs.datasetPermissionsUser + : "") + + ":" + + (this.options.zfs.datasetPermissionsGroup + ? this.options.zfs.datasetPermissionsGroup + : ""), + properties.mountpoint.value + ]); + driver.ctx.logger.verbose("set ownership command: %s", command); + response = await sshClient.exec(command); + } + + break; + case "volume": + // TODO: create all the necessary iscsi stuff + // set properties + // set reserve + setProps = true; + + // this should be already set, but when coming from a volume source + // it may not match that of the source + // TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured + properties.volsize = capacity_bytes; + + //dedup + //compression + + if (setProps) { + await zb.zfs.set(datasetName, properties); + } + + break; + } + + volume_context = await this.createShare(call, datasetName); + await zb.zfs.set(datasetName, { + [SHARE_VOLUME_CONTEXT_PROPERTY_NAME]: + "'" + JSON.stringify(volume_context) + "'" + }); + + // set this just before sending out response so we know if volume completed + // this should give us a relatively sane way to clean up artifacts over time + await zb.zfs.set(datasetName, { [SUCCESS_PROPERTY_NAME]: "true" }); + + const res = { + volume: { + volume_id: name, + capacity_bytes: this.options.zfs.datasetEnableQuotas + ? capacity_bytes + : 0, + content_source: volume_content_source, + volume_context + } + }; + + return res; + } + + /** + * Delete a volume + * + * Deleting a volume consists of the following steps: + * 1. delete the nfs share + * 2. delete the dataset + * + * @param {*} call + */ + async DeleteVolume(call) { + const driver = this; + const zb = this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let name = call.request.volume_id; + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + const datasetName = datasetParentName + "/" + name; + let properties; + + // get properties needed for remaining calls + try { + properties = await zb.zfs.get(datasetName, [ + "mountpoint", + "origin", + "refquota", + "compression", + VOLUME_CSI_NAME_PROPERTY_NAME + ]); + properties = properties[datasetName]; + } catch (err) { + let ignore = false; + if (err.toString().includes("dataset does not exist")) { + ignore = true; + } + + if (!ignore) { + throw err; + } + } + + driver.ctx.logger.debug("dataset properties: %j", properties); + + // remove share resources + await this.deleteShare(call, datasetName); + + // remove parent snapshot if appropriate with defer + if ( + properties && + properties.origin && + properties.origin.value != "-" && + zb.helpers + .extractSnapshotName(properties.origin.value) + .startsWith(VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX) + ) { + driver.ctx.logger.debug( + "removing with defer source snapshot: %s", + properties.origin.value + ); + + try { + await zb.zfs.destroy(properties.origin.value, { + recurse: true, + force: true, + defer: true + }); + } catch (err) { + if (err.toString().includes("snapshot has dependent clones")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "snapshot has dependent clones" + ); + } + throw err; + } + } + + // NOTE: -f does NOT allow deletes if dependent filesets exist + // NOTE: -R will recursively delete items + dependent filesets + // delete dataset + try { + await zb.zfs.destroy(datasetName, { recurse: true, force: true }); + } catch (err) { + if (err.toString().includes("filesystem has dependent clones")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "filesystem has dependent clones" + ); + } + + throw err; + } + + return {}; + } + + /** + * + * @param {*} call + */ + async ControllerExpandVolume(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const zb = this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let name = call.request.volume_id; + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume_id is required` + ); + } + + const datasetName = datasetParentName + "/" + name; + + let capacity_bytes = + call.request.capacity_range.required_bytes || + call.request.capacity_range.limit_bytes; + if (!capacity_bytes) { + //should never happen, value must be set + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `volume capacity is required (either required_bytes or limit_bytes)` + ); + } + + if (capacity_bytes && driverZfsResourceType == "volume") { + //make sure to align capacity_bytes with zvol blocksize + //volume size must be a multiple of volume block size + let properties = await zb.zfs.get(datasetName, ["volblocksize"]); + properties = properties[datasetName]; + capacity_bytes = zb.helpers.generateZvolSize( + capacity_bytes, + properties.volblocksize.value + ); + } + + if ( + call.request.capacity_range.required_bytes > 0 && + call.request.capacity_range.limit_bytes > 0 && + call.request.capacity_range.required_bytes > + call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `required_bytes is greather than limit_bytes` + ); + } + + // ensure *actual* capacity is not greater than limit + if ( + call.request.capacity_range.limit_bytes && + call.request.capacity_range.limit_bytes > 0 && + capacity_bytes > call.request.capacity_range.limit_bytes + ) { + throw new GrpcError( + grpc.status.OUT_OF_RANGE, + `required volume capacity is greater than limit` + ); + } + + let setProps = false; + let properties = {}; + + switch (driverZfsResourceType) { + case "filesystem": + // set quota + if (this.options.zfs.datasetEnableQuotas) { + setProps = true; + properties.refquota = capacity_bytes; + } + + // set reserve + if (this.options.zfs.datasetEnableReservation) { + setProps = true; + properties.refreservation = capacity_bytes; + } + break; + case "volume": + properties.volsize = capacity_bytes; + setProps = true; + + if (this.options.zfs.zvolEnableReservation) { + properties.refreservation = capacity_bytes; + } + break; + } + + if (setProps) { + await zb.zfs.set(datasetName, properties); + } + + await this.expandVolume(call, datasetName); + + return { + capacity_bytes: this.options.zfs.datasetEnableQuotas ? capacity_bytes : 0, + node_expansion_required: driverZfsResourceType == "volume" ? true : false + }; + } + + /** + * TODO: consider volume_capabilities? + * + * @param {*} call + */ + async GetCapacity(call) { + const driver = this; + const zb = this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + if (call.request.volume_capabilities) { + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { available_capacity: 0 }; + } + } + + const datasetName = datasetParentName; + + let properties; + properties = await zb.zfs.get(datasetName, ["avail"]); + properties = properties[datasetName]; + + return { available_capacity: properties.available.value }; + } + + /** + * + * TODO: check capability to ensure not asking about block volumes + * + * @param {*} call + */ + async ListVolumes(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const zb = this.getZetabyte(); + + let datasetParentName = this.getVolumeParentDatasetName(); + let entries = []; + let entries_length = 0; + let next_token; + let uuid, page, next_page; + let response; + + const max_entries = call.request.max_entries; + const starting_token = call.request.starting_token; + + // get data from cache and return immediately + if (starting_token) { + let parts = starting_token.split(":"); + uuid = parts[0]; + page = parseInt(parts[1]); + entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`); + if (entries) { + entries = JSON.parse(JSON.stringify(entries)); + entries_length = entries.length; + entries = entries.splice((page - 1) * max_entries, max_entries); + if (page * max_entries < entries_length) { + next_page = page + 1; + next_token = `${uuid}:${next_page}`; + } else { + next_token = null; + } + const data = { + entries: entries, + next_token: next_token + }; + + return data; + } else { + // TODO: throw error / cache expired + } + } + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + const datasetName = datasetParentName; + + let types = []; + switch (driverZfsResourceType) { + case "filesystem": + types = ["filesystem"]; + break; + case "volume": + types = ["volume"]; + break; + } + try { + response = await zb.zfs.list( + datasetName, + [ + "name", + "mountpoint", + "refquota", + "avail", + "used", + VOLUME_CSI_NAME_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME, + VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME, + "volsize", + MANAGED_PROPERTY_NAME, + SHARE_VOLUME_CONTEXT_PROPERTY_NAME, + SUCCESS_PROPERTY_NAME + ], + { types, recurse: true } + ); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + return { + entries: [], + next_token: null + }; + } + + throw err; + } + + driver.ctx.logger.debug("list volumes result: %j", response); + + // remove parent dataset from results + if (driverZfsResourceType == "filesystem") { + response.data.shift(); + } + + entries = []; + response.indexed.forEach(row => { + // ignore rows were csi_name is empty + if (row[MANAGED_PROPERTY_NAME] != "true") { + return; + } + + let volume_content_source; + if ( + zb.helpers.isPropertyValueSet( + row[VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME] + ) + ) { + volume_content_source = {}; + switch (row[VOLUME_CONTENT_SOURCE_TYPE_PROPERTY_NAME]) { + case "snapshot": + volume_content_source.snapshot = {}; + volume_content_source.snapshot.snapshot_id = + row[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME]; + break; + case "volume": + volume_content_source.volume = {}; + volume_content_source.volume.volume_id = + row[VOLUME_CONTENT_SOURCE_ID_PROPERTY_NAME]; + break; + } + } + + entries.push({ + volume: { + // remove parent dataset info + volume_id: row["name"].replace( + new RegExp("^" + datasetName + "/"), + "" + ), + capacity_bytes: + driverZfsResourceType == "filesystem" + ? row["refquota"] + : row["volsize"], + content_source: volume_content_source, + volume_context: JSON.parse(row[SHARE_VOLUME_CONTEXT_PROPERTY_NAME]) + } + }); + }); + + if (max_entries && entries.length > max_entries) { + uuid = uuidv4(); + this.ctx.cache.set( + `ListVolumes:result:${uuid}`, + JSON.parse(JSON.stringify(entries)) + ); + next_token = `${uuid}:2`; + entries = entries.splice(0, max_entries); + } + + const data = { + entries: entries, + next_token: next_token + }; + + return data; + } + + /** + * + * @param {*} call + */ + async ListSnapshots(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const zb = this.getZetabyte(); + + let entries = []; + let entries_length = 0; + let next_token; + let uuid, page, next_page; + + const max_entries = call.request.max_entries; + const starting_token = call.request.starting_token; + + let types = []; + + const volumeParentDatasetName = this.getVolumeParentDatasetName(); + const snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName(); + + // get data from cache and return immediately + if (starting_token) { + let parts = starting_token.split(":"); + uuid = parts[0]; + page = parseInt(parts[1]); + entries = this.ctx.cache.get(`ListSnapshots:result:${uuid}`); + if (entries) { + entries = JSON.parse(JSON.stringify(entries)); + entries_length = entries.length; + entries = entries.splice((page - 1) * max_entries, max_entries); + if (page * max_entries < entries_length) { + next_page = page + 1; + next_token = `${uuid}:${next_page}`; + } else { + next_token = null; + } + const data = { + entries: entries, + next_token: next_token + }; + + return data; + } else { + // TODO: throw error / cache expired + } + } + + if (!volumeParentDatasetName) { + // throw error + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + let snapshot_id = call.request.snapshot_id; + let source_volume_id = call.request.source_volume_id; + + entries = []; + for (let loopType of ["snapshot", "filesystem"]) { + let response, operativeFilesystem, operativeFilesystemType; + let datasetParentName; + switch (loopType) { + case "snapshot": + datasetParentName = volumeParentDatasetName; + types = ["snapshot"]; + // should only send 1 of snapshot_id or source_volume_id, preferring the former if sent + if (snapshot_id) { + if (!zb.helpers.isZfsSnapshot(snapshot_id)) { + return; + } + operativeFilesystem = volumeParentDatasetName + "/" + snapshot_id; + operativeFilesystemType = 3; + } else if (source_volume_id) { + operativeFilesystem = + volumeParentDatasetName + "/" + source_volume_id; + operativeFilesystemType = 2; + } else { + operativeFilesystem = volumeParentDatasetName; + operativeFilesystemType = 1; + } + break; + case "filesystem": + datasetParentName = snapshotParentDatasetName; + if (!datasetParentName) { + continue; + } + if (driverZfsResourceType == "filesystem") { + types = ["filesystem"]; + } else { + types = ["volume"]; + } + + // should only send 1 of snapshot_id or source_volume_id, preferring the former if sent + if (snapshot_id) { + if (zb.helpers.isZfsSnapshot(snapshot_id)) { + continue; + } + operativeFilesystem = snapshotParentDatasetName + "/" + snapshot_id; + operativeFilesystemType = 3; + } else if (source_volume_id) { + operativeFilesystem = + snapshotParentDatasetName + "/" + source_volume_id; + operativeFilesystemType = 2; + } else { + operativeFilesystem = snapshotParentDatasetName; + operativeFilesystemType = 1; + } + break; + } + + try { + response = await zb.zfs.list( + operativeFilesystem, + [ + "name", + "creation", + "mountpoint", + "refquota", + "avail", + "used", + VOLUME_CSI_NAME_PROPERTY_NAME, + SNAPSHOT_CSI_NAME_PROPERTY_NAME, + MANAGED_PROPERTY_NAME + ], + { types, recurse: true } + ); + } catch (err) { + let message; + if (err.toString().includes("dataset does not exist")) { + switch (operativeFilesystemType) { + case 1: + //message = `invalid configuration: datasetParentName ${datasetParentName} does not exist`; + continue; + break; + case 2: + message = `source_volume_id ${source_volume_id} does not exist`; + break; + case 3: + message = `snapshot_id ${snapshot_id} does not exist`; + break; + } + throw new GrpcError(grpc.status.NOT_FOUND, message); + } + throw new GrpcError(grpc.status.FAILED_PRECONDITION, e.toString()); + } + + response.indexed.forEach(row => { + // skip any snapshots not explicitly created by CO + if (row[MANAGED_PROPERTY_NAME] != "true") { + return; + } + + // ignore snapshots that are not explicit CO snapshots + if ( + !zb.helpers.isPropertyValueSet(row[SNAPSHOT_CSI_NAME_PROPERTY_NAME]) + ) { + return; + } + + // strip parent dataset + let source_volume_id = row["name"].replace( + new RegExp("^" + datasetParentName + "/"), + "" + ); + + // strip snapshot details (@snapshot-name) + if (source_volume_id.includes("@")) { + source_volume_id = source_volume_id.substring( + 0, + source_volume_id.indexOf("@") + ); + } else { + source_volume_id = source_volume_id.replace( + new RegExp("/" + row[SNAPSHOT_CSI_NAME_PROPERTY_NAME] + "$"), + "" + ); + } + + if (source_volume_id == datasetParentName) { + return; + } + + if (source_volume_id) + entries.push({ + snapshot: { + /** + * The purpose of this field is to give CO guidance on how much space + * is needed to create a volume from this snapshot. + * + * In that vein, I think it's best to return 0 here given the + * unknowns of 'cow' implications. + */ + size_bytes: 0, + + // remove parent dataset details + snapshot_id: row["name"].replace( + new RegExp("^" + datasetParentName + "/"), + "" + ), + source_volume_id: source_volume_id, + //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto + creation_time: { + seconds: row["creation"], + nanos: 0 + }, + ready_to_use: true + } + }); + }); + } + + if (max_entries && entries.length > max_entries) { + uuid = uuidv4(); + this.ctx.cache.set( + `ListSnapshots:result:${uuid}`, + JSON.parse(JSON.stringify(entries)) + ); + next_token = `${uuid}:2`; + entries = entries.splice(0, max_entries); + } + + const data = { + entries: entries, + next_token: next_token + }; + + return data; + } + + /** + * + * @param {*} call + */ + async CreateSnapshot(call) { + const driver = this; + const driverZfsResourceType = this.getDriverZfsResourceType(); + const zb = this.getZetabyte(); + + let detachedSnapshot = false; + try { + let tmpDetachedSnapshot = JSON.parse( + call.request.parameters.detachedSnapshots + ); // snapshot class parameter + if (typeof tmpDetachedSnapshot === "boolean") { + detachedSnapshot = tmpDetachedSnapshot; + } + } catch (e) {} + + let response; + const volumeParentDatasetName = this.getVolumeParentDatasetName(); + let datasetParentName; + let snapshotProperties = {}; + let types = []; + + if (detachedSnapshot) { + datasetParentName = this.getDetachedSnapshotParentDatasetName(); + if (driverZfsResourceType == "filesystem") { + types.push("filesystem"); + } else { + types.push("volume"); + } + } else { + datasetParentName = this.getVolumeParentDatasetName(); + types.push("snapshot"); + } + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + // both these are required + let source_volume_id = call.request.source_volume_id; + let name = call.request.name; + + if (!source_volume_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot source_volume_id is required` + ); + } + + if (!name) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name is required` + ); + } + + const datasetName = datasetParentName + "/" + source_volume_id; + snapshotProperties[SNAPSHOT_CSI_NAME_PROPERTY_NAME] = name; + snapshotProperties[ + SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME + ] = source_volume_id; + snapshotProperties[MANAGED_PROPERTY_NAME] = "true"; + + driver.ctx.logger.verbose("requested snapshot name: %s", name); + + let invalid_chars; + invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi); + if (invalid_chars) { + invalid_chars = String.prototype.concat( + ...new Set(invalid_chars.join("")) + ); + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot name contains invalid characters: ${invalid_chars}` + ); + } + + // https://stackoverflow.com/questions/32106243/regex-to-remove-all-non-alpha-numeric-and-replace-spaces-with/32106277 + name = name.replace(/[^a-z0-9_\-:.+]+/gi, ""); + + driver.ctx.logger.verbose("cleansed snapshot name: %s", name); + + let fullSnapshotName; + let snapshotDatasetName; + let tmpSnapshotName; + if (detachedSnapshot) { + fullSnapshotName = datasetName + "/" + name; + } else { + fullSnapshotName = datasetName + "@" + name; + } + + driver.ctx.logger.verbose("full snapshot name: %s", fullSnapshotName); + + if (detachedSnapshot) { + tmpSnapshotName = + volumeParentDatasetName + + "/" + + source_volume_id + + "@" + + VOLUME_SOURCE_DETACHED_SNAPSHOT_PREFIX + + name; + snapshotDatasetName = datasetName + "/" + name; + + await zb.zfs.create(datasetName, { parents: true }); + + try { + await zb.zfs.snapshot(tmpSnapshotName); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_volume_id ${source_volume_id} does not exist` + ); + } + + throw err; + } + + try { + response = await zb.zfs.send_receive( + tmpSnapshotName, + [], + snapshotDatasetName, + [] + ); + + response = await zb.zfs.set(snapshotDatasetName, snapshotProperties); + } catch (err) { + if ( + err.toString().includes("destination") && + err.toString().includes("exists") + ) { + // move along + } else { + throw err; + } + } + + // remove snapshot from target + await zb.zfs.destroy( + snapshotDatasetName + + "@" + + zb.helpers.extractSnapshotName(tmpSnapshotName), + { + recurse: true, + force: true, + defer: true + } + ); + + // remove snapshot from source + await zb.zfs.destroy(tmpSnapshotName, { + recurse: true, + force: true, + defer: true + }); + } else { + try { + await zb.zfs.snapshot(fullSnapshotName, { + properties: snapshotProperties + }); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `snapshot source_volume_id ${source_volume_id} does not exist` + ); + } + + throw err; + } + } + + let properties; + properties = await zb.zfs.get( + fullSnapshotName, + [ + "name", + "creation", + "mountpoint", + "refquota", + "avail", + "used", + VOLUME_CSI_NAME_PROPERTY_NAME, + SNAPSHOT_CSI_NAME_PROPERTY_NAME, + SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME, + MANAGED_PROPERTY_NAME + ], + { types } + ); + properties = properties[fullSnapshotName]; + driver.ctx.logger.verbose("snapshot properties: %j", properties); + + // set this just before sending out response so we know if volume completed + // this should give us a relatively sane way to clean up artifacts over time + await zb.zfs.set(fullSnapshotName, { [SUCCESS_PROPERTY_NAME]: "true" }); + + return { + snapshot: { + /** + * The purpose of this field is to give CO guidance on how much space + * is needed to create a volume from this snapshot. + * + * In that vein, I think it's best to return 0 here given the + * unknowns of 'cow' implications. + */ + size_bytes: 0, + + // remove parent dataset details + snapshot_id: properties.name.value.replace( + new RegExp("^" + datasetParentName + "/"), + "" + ), + source_volume_id: source_volume_id, + //https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto + creation_time: { + seconds: properties.creation.value, + nanos: 0 + }, + ready_to_use: true + } + }; + } + + /** + * In addition, if clones have been created from a snapshot, then they must + * be destroyed before the snapshot can be destroyed. + * + * @param {*} call + */ + async DeleteSnapshot(call) { + const driver = this; + const zb = this.getZetabyte(); + + const snapshot_id = call.request.snapshot_id; + + if (!snapshot_id) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `snapshot_id is required` + ); + } + + const detachedSnapshot = !zb.helpers.isZfsSnapshot(snapshot_id); + let datasetParentName; + + if (detachedSnapshot) { + datasetParentName = this.getDetachedSnapshotParentDatasetName(); + } else { + datasetParentName = this.getVolumeParentDatasetName(); + } + + if (!datasetParentName) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: missing datasetParentName` + ); + } + + const fullSnapshotName = datasetParentName + "/" + snapshot_id; + + driver.ctx.logger.verbose("deleting snapshot: %s", fullSnapshotName); + + try { + await zb.zfs.destroy(fullSnapshotName, { + recurse: true, + force: true, + defer: zb.helpers.isZfsSnapshot(snapshot_id) // only defer when snapshot + }); + } catch (err) { + if (err.toString().includes("snapshot has dependent clones")) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + "snapshot has dependent clones" + ); + } + + throw err; + } + + // cleanup parent dataset if possible + if (detachedSnapshot) { + let containerDataset = zb.helpers.extractParentDatasetName( + fullSnapshotName + ); + try { + await this.removeSnapshotsFromDatatset(containerDataset); + await zb.zfs.destroy(containerDataset); + } catch (err) { + if (!err.toString().includes("filesystem has children")) { + throw err; + } + } + } + + return {}; + } + + /** + * + * @param {*} call + */ + async ValidateVolumeCapabilities(call) { + const driver = this; + const result = this.assertCapabilities(call.request.volume_capabilities); + + if (result.valid !== true) { + return { message: result.message }; + } + + return { + confirmed: { + volume_context: call.request.volume_context, + volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested + parameters: call.request.parameters + } + }; + } +} + +module.exports.ControllerZfsSshBaseDriver = ControllerZfsSshBaseDriver; diff --git a/src/driver/freenas/http/index.js b/src/driver/freenas/http/index.js new file mode 100644 index 0000000..bfbb608 --- /dev/null +++ b/src/driver/freenas/http/index.js @@ -0,0 +1,163 @@ +const request = require("request"); +const URI = require("uri-js"); +const USER_AGENT = "democratic-csi-driver"; + +class Client { + constructor(options = {}) { + this.options = options; + this.logger = console; + + // default to v1.0 for now + if (!this.options.apiVersion) { + this.options.apiVersion = 1; + } + } + getBaseURL() { + const server = this.options; + const options = { + scheme: server.protocol, + host: server.host, + port: server.port, + //userinfo: server.username + ":" + server.password, + path: server.apiVersion == 1 ? "/api/v1.0" : "/api/v2.0" + }; + return URI.serialize(options); + } + + setApiVersion(apiVersion) { + this.options.apiVersion = apiVersion; + } + + getApiVersion() { + return this.options.apiVersion; + } + + log_repsonse(error, response, body, options) { + this.logger.debug("FREENAS HTTP REQUEST: " + JSON.stringify(options)); + this.logger.debug("FREENAS HTTP ERROR: " + error); + this.logger.debug("FREENAS HTTP STATUS: " + response.statusCode); + this.logger.debug( + "FREENAS HTTP HEADERS: " + JSON.stringify(response.headers) + ); + this.logger.debug("FREENAS HTTP BODY: " + JSON.stringify(body)); + } + + async get(endpoint, data) { + const client = this; + // curl -X GET "http://bitness.lan/api/v2.0/core/ping" -H "accept: */*" + if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) { + endpoint += "/"; + } + + return new Promise((resolve, reject) => { + const options = { + method: "GET", + url: this.getBaseURL() + endpoint, + headers: { + Accept: "*/*", + "User-Agent": USER_AGENT + }, + json: true, + qs: data + }; + request(options, function(err, res, body) { + client.log_repsonse(...arguments, options); + if (err) { + reject(err); + } + + resolve(res); + }).auth(client.options.username, client.options.password); + }); + } + + async post(endpoint, data) { + const client = this; + // curl -X POST "http://bitness.lan/api/v2.0/core/get_methods" -H "accept: */*" -H "Content-Type: application/json" -d "\"string\"" + if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) { + endpoint += "/"; + } + + return new Promise((resolve, reject) => { + const options = { + method: "POST", + url: this.getBaseURL() + endpoint, + headers: { + Accept: "*/*", + "User-Agent": USER_AGENT + }, + json: true, + body: data + }; + request(options, function(err, res, body) { + client.log_repsonse(...arguments, options); + if (err) { + reject(err); + } + + resolve(res); + }).auth(client.options.username, client.options.password); + }); + } + + async put(endpoint, data) { + const client = this; + // curl -X PUT "http://bitness.lan/api/v2.0/sharing/smb/id/1" -H "accept: */*" -H "Content-Type: application/json" -d "{\"path\":\"string\",\"home\":true,\"name\":\"string\",\"comment\":\"string\",\"ro\":true,\"browsable\":true,\"timemachine\":true,\"recyclebin\":true,\"showhiddenfiles\":true,\"guestok\":true,\"guestonly\":true,\"abe\":true,\"hostsallow\":[null],\"hostsdeny\":[null],\"vfsobjects\":[null],\"storage_task\":0,\"auxsmbconf\":\"string\",\"default_permissions\":true}" + if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) { + endpoint += "/"; + } + + return new Promise((resolve, reject) => { + const options = { + method: "PUT", + url: this.getBaseURL() + endpoint, + headers: { + Accept: "*/*", + "User-Agent": USER_AGENT + }, + json: true, + body: data + }; + request(options, function(err, res, body) { + client.log_repsonse(...arguments, options); + if (err) { + reject(err); + } + + resolve(res); + }).auth(client.options.username, client.options.password); + }); + } + + //Unauthorized + async delete(endpoint, data) { + const client = this; + // curl -X DELETE "http://bitness.lan/api/v2.0/sharing/smb/id/1" -H "accept: */*" -H "Content-Type: application/json" -d "{}" + if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) { + endpoint += "/"; + } + + return new Promise((resolve, reject) => { + const options = { + method: "DELETE", + url: this.getBaseURL() + endpoint, + headers: { + Accept: "*/*", + "User-Agent": USER_AGENT + }, + json: true, + body: data + }; + request(options, function(err, res, body) { + client.log_repsonse(...arguments, options); + if (err) { + reject(err); + } + + resolve(res); + }).auth(client.options.username, client.options.password); + }); + } +} + +module.exports.Client = Client; diff --git a/src/driver/freenas/index.js b/src/driver/freenas/index.js new file mode 100644 index 0000000..514c072 --- /dev/null +++ b/src/driver/freenas/index.js @@ -0,0 +1,1027 @@ +const grpc = require("grpc"); +const { ControllerZfsSshBaseDriver } = require("../controller-zfs-ssh"); +const { GrpcError } = require("../../utils/grpc"); +const HttpClient = require("./http").Client; + +// freenas properties +const FREENAS_NFS_SHARE_PROPERTY_NAME = "democratic-csi:freenas_nfs_share_id"; +const FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_target_id"; +const FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_extent_id"; +const FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME = + "democratic-csi:freenas_iscsi_targettoextent_id"; + +class FreeNASDriver extends ControllerZfsSshBaseDriver { + /** + * cannot make this a storage class parameter as storage class/etc context is *not* sent + * into various calls such as GetControllerCapabilities etc + */ + getDriverZfsResourceType() { + switch (this.ctx.args.driver) { + case "freenas-nfs": + return "filesystem"; + case "freenas-iscsi": + return "volume"; + default: + throw new Error("unknown driver: " + this.ctx.args.driver); + } + } + + getHttpClient() { + const client = new HttpClient(this.options.httpConnection); + client.logger = this.ctx.logger; + return client; + } + + getDriverShareType() { + switch (this.ctx.args.driver) { + case "freenas-nfs": + return "nfs"; + case "freenas-iscsi": + return "iscsi"; + default: + throw new Error("unknown driver: " + this.ctx.args.driver); + } + } + + async findResourceByProperties(endpoint, match) { + if (!match || Object.keys(match).length < 1) { + return; + } + const httpClient = this.getHttpClient(); + let target; + let page = 0; + + // loop and find target + let queryParams = {}; + // TODO: relax this using getSystemVersion perhaps + // https://jira.ixsystems.com/browse/NAS-103916 + if (httpClient.getApiVersion() == 1) { + queryParams.limit = 100; + queryParams.offset = 0; + } + + while (!target) { + //Content-Range: items 0-2/3 (full set) + //Content-Range: items 0--1/3 (invalid offset) + if (queryParams.hasOwnProperty("offset")) { + queryParams.offset = queryParams.limit * page; + } + + let response = await httpClient.get(endpoint, queryParams); + + if (response.statusCode == 200) { + if (response.body.length < 1) { + break; + } + response.body.some(i => { + let isMatch = true; + for (let property in match) { + if (match[property] != i[property]) { + isMatch = false; + break; + } + } + + if (isMatch) { + target = i; + return true; + } + + return false; + }); + } else { + throw new Error( + "FreeNAS http error - code: " + + response.statusCode + + " body: " + + JSON.stringify(response.body) + ); + } + page++; + } + + return target; + } + + /** + * should create any necessary share resources + * should set the SHARE_VOLUME_CONTEXT_PROPERTY_NAME propery + * + * @param {*} datasetName + */ + async createShare(call, datasetName) { + const driverShareType = this.getDriverShareType(); + const httpClient = this.getHttpClient(); + const apiVersion = httpClient.getApiVersion(); + const zb = this.getZetabyte(); + + let properties; + let response; + let share = {}; + + switch (driverShareType) { + case "nfs": + properties = await zb.zfs.get(datasetName, [ + "mountpoint", + FREENAS_NFS_SHARE_PROPERTY_NAME + ]); + properties = properties[datasetName]; + this.ctx.logger.debug("zfs props data: %j", properties); + + // create nfs share + if ( + !zb.helpers.isPropertyValueSet( + properties[FREENAS_NFS_SHARE_PROPERTY_NAME].value + ) + ) { + switch (apiVersion) { + case 1: + case 2: + switch (apiVersion) { + case 1: + share = { + nfs_paths: [properties.mountpoint.value], + nfs_comment: `democratic-csi (${this.options.csiName}): ${datasetName}`, + nfs_network: this.options.nfs.shareAllowedNetworks.join( + "," + ), + nfs_hosts: this.options.nfs.shareAllowedHosts.join(","), + nfs_alldirs: this.options.nfs.shareAlldirs, + nfs_ro: false, + nfs_quiet: false, + nfs_maproot_user: this.options.nfs.shareMaprootUser, + nfs_maproot_group: this.options.nfs.shareMaprootGroup, + nfs_mapall_user: this.options.nfs.shareMapallUser, + nfs_mapall_group: this.options.nfs.shareMapallGroup, + nfs_security: [] + }; + break; + case 2: + share = { + paths: [properties.mountpoint.value], + comment: `democratic-csi (${this.options.csiName}): ${datasetName}`, + networks: this.options.nfs.shareAllowedNetworks, + hosts: this.options.nfs.shareAllowedHosts, + alldirs: this.options.nfs.shareAlldirs, + ro: false, + quiet: false, + maproot_user: this.options.nfs.shareMaprootUser, + maproot_group: this.options.nfs.shareMaprootGroup, + mapall_user: this.options.nfs.shareMapallUser, + mapall_group: this.options.nfs.shareMapallGroup, + security: [] + }; + break; + } + + response = await httpClient.post("/sharing/nfs", share); + + /** + * v1 = 201 + * v2 = 200 + */ + if ([200, 201].includes(response.statusCode)) { + //set zfs property + await zb.zfs.set(datasetName, { + [FREENAS_NFS_SHARE_PROPERTY_NAME]: response.body.id + }); + } else { + /** + * v1 = 409 + * v2 = 422 + */ + if ( + [409, 422].includes(response.statusCode) && + JSON.stringify(response.body).includes( + "You can't share same filesystem with all hosts twice." + ) + ) { + // move along + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating nfs share - code: ${response.statusCode} body: ${response.body}` + ); + } + } + + let volume_context = { + node_attach_driver: "nfs", + server: this.options.nfs.shareHost, + share: properties.mountpoint.value + }; + return volume_context; + + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } else { + let volume_context = { + node_attach_driver: "nfs", + server: this.options.nfs.shareHost, + share: properties.mountpoint.value + }; + return volume_context; + } + break; + case "iscsi": + properties = await zb.zfs.get(datasetName, [ + FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME, + FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME, + FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME + ]); + properties = properties[datasetName]; + this.ctx.logger.debug("zfs props data: %j", properties); + + let basename; + let iscsiName = zb.helpers.extractLeafName(datasetName); + if (this.options.iscsi.namePrefix) { + iscsiName = this.options.iscsi.namePrefix + iscsiName; + } + + if (this.options.iscsi.nameSuffix) { + iscsiName += this.options.iscsi.nameSuffix; + } + + iscsiName = iscsiName.toLowerCase(); + + let extentDiskName = "zvol/" + datasetName; + + /** + * limit is a FreeBSD limitation + * https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab + */ + if (extentDiskName.length > 63) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `extent disk name cannot exceed 63 characters: ${extentDiskName}` + ); + } + + this.ctx.logger.info( + "FreeNAS creating iscsi assets with name: " + iscsiName + ); + + const extentInsecureTpc = this.options.iscsi.hasOwnProperty( + "extentInsecureTpc" + ) + ? this.options.iscsi.extentInsecureTpc + : true; + + const extentXenCompat = this.options.iscsi.hasOwnProperty( + "extentXenCompat" + ) + ? this.options.iscsi.extentXenCompat + : false; + + const extentBlocksize = this.options.iscsi.hasOwnProperty( + "extentBlocksize" + ) + ? this.options.iscsi.extentBlocksize + : 512; + + const extentDisablePhysicalBlocksize = this.options.iscsi.hasOwnProperty( + "extentDisablePhysicalBlocksize" + ) + ? this.options.iscsi.extentDisablePhysicalBlocksize + : true; + + const extentRpm = this.options.iscsi.hasOwnProperty("extentRpm") + ? this.options.iscsi.extentRpm + : "SSD"; + + const extentAvailThreshold = this.options.iscsi.hasOwnProperty( + "extentAvailThreshold" + ) + ? this.options.iscsi.extentAvailThreshold + : null; + + switch (apiVersion) { + case 1: { + response = await httpClient.get( + "/services/iscsi/globalconfiguration" + ); + if (response.statusCode != 200) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error getting iscsi configuration - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + basename = response.body.iscsi_basename; + this.ctx.logger.verbose("FreeNAS ISCSI BASENAME: " + basename); + + // create target + let target = { + iscsi_target_name: iscsiName, + iscsi_target_alias: "" + }; + + response = await httpClient.post("/services/iscsi/target", target); + + // 409 if invalid + if (response.statusCode != 201) { + target = null; + if ( + response.statusCode == 409 && + JSON.stringify(response.body).includes( + "Target name already exists" + ) + ) { + target = await this.findResourceByProperties( + "/services/iscsi/target", + { + iscsi_target_name: iscsiName + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi target - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + target = response.body; + } + + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); + + // set target.id on zvol + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id + }); + + // create targetgroup(s) + // targetgroups do have IDs + for (let targetGroupConfig of this.options.iscsi.targetGroups) { + let targetGroup = { + iscsi_target: target.id, + iscsi_target_authgroup: targetGroupConfig.targetGroupAuthGroup, + iscsi_target_authtype: targetGroupConfig.targetGroupAuthType + ? targetGroupConfig.targetGroupAuthType + : "None", + iscsi_target_portalgroup: + targetGroupConfig.targetGroupPortalGroup, + iscsi_target_initiatorgroup: + targetGroupConfig.targetGroupInitiatorGroup, + iscsi_target_initialdigest: "Auto" + }; + response = await httpClient.post( + "/services/iscsi/targetgroup", + targetGroup + ); + + // 409 if invalid + if (response.statusCode != 201) { + targetGroup = null; + /** + * 404 gets returned with an unable to process response when the DB is corrupted (has invalid entries in essense) + * + * To resolve properly the DB should be cleaned up + * /usr/local/etc/rc.d/django stop + * /usr/local/etc/rc.d/nginx stop + * sqlite3 /data/freenas-v1.db + * + * // this deletes everything, probably not what you want + * // should have a better query to only find entries where associated assets no longer exist + * DELETE from services_iscsitargetgroups; + * + * /usr/local/etc/rc.d/django restart + * /usr/local/etc/rc.d/nginx restart + */ + if ( + response.statusCode == 404 || + (response.statusCode == 409 && + JSON.stringify(response.body).includes( + "cannot be duplicated on a target" + )) + ) { + targetGroup = await this.findResourceByProperties( + "/services/iscsi/targetgroup", + { + iscsi_target: target.id, + iscsi_target_portalgroup: + targetGroupConfig.targetGroupPortalGroup, + iscsi_target_initiatorgroup: + targetGroupConfig.targetGroupInitiatorGroup + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targetgroup - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetGroup = response.body; + } + + if (!targetGroup) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targetgroup` + ); + } + + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_GROUP: %j", + targetGroup + ); + } + + let extent = { + iscsi_target_extent_comment: "", + iscsi_target_extent_type: "Disk", // Disk/File, after save Disk becomes "ZVOL" + iscsi_target_extent_name: iscsiName, + iscsi_target_extent_insecure_tpc: extentInsecureTpc, + //iscsi_target_extent_naa: "0x3822690834aae6c5", + iscsi_target_extent_disk: extentDiskName, + iscsi_target_extent_xen: extentXenCompat, + iscsi_target_extent_avail_threshold: extentAvailThreshold, + iscsi_target_extent_blocksize: Number(extentBlocksize), + iscsi_target_extent_pblocksize: extentDisablePhysicalBlocksize, + iscsi_target_extent_rpm: isNaN(Number(extentRpm)) + ? "SSD" + : Number(extentRpm), + iscsi_target_extent_ro: false + }; + response = await httpClient.post("/services/iscsi/extent", extent); + + // 409 if invalid + if (response.statusCode != 201) { + extent = null; + if ( + response.statusCode == 409 && + JSON.stringify(response.body).includes( + "Extent name must be unique" + ) + ) { + extent = await this.findResourceByProperties( + "/services/iscsi/extent", + { iscsi_target_extent_name: iscsiName } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi extent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + extent = response.body; + } + + if (!extent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi extent` + ); + } + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id + }); + + // create targettoextent + let targetToExtent = { + iscsi_target: target.id, + iscsi_extent: extent.id, + iscsi_lunid: 0 + }; + response = await httpClient.post( + "/services/iscsi/targettoextent", + targetToExtent + ); + + // 409 if invalid + if (response.statusCode != 201) { + targetToExtent = null; + + // LUN ID is already being used for this target. + // Extent is already in this target. + if ( + response.statusCode == 409 && + JSON.stringify(response.body).includes( + "Extent is already in this target." + ) && + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + ) + ) { + targetToExtent = await this.findResourceByProperties( + "/services/iscsi/targettoextent", + { + iscsi_target: target.id, + iscsi_extent: extent.id, + iscsi_lunid: 0 + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targettoextent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetToExtent = response.body; + } + + if (!targetToExtent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targettoextent` + ); + } + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_TO_EXTENT: %j", + targetToExtent + ); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: targetToExtent.id + }); + + break; + } + case 2: + response = await httpClient.get("/iscsi/global"); + if (response.statusCode != 200) { + throw new GrpcError( + grpc.status.UNKNOWN, + `error getting iscsi configuration - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + basename = response.body.basename; + this.ctx.logger.verbose("FreeNAS ISCSI BASENAME: " + basename); + + // create target and targetgroup + //let targetId; + let targetGroups = []; + for (let targetGroupConfig of this.options.iscsi.targetGroups) { + targetGroups.push({ + portal: targetGroupConfig.targetGroupPortalGroup, + initiator: targetGroupConfig.targetGroupInitiatorGroup, + auth: + targetGroupConfig.targetGroupAuthGroup > 0 + ? targetGroupConfig.targetGroupAuthGroup + : null, + authmethod: + targetGroupConfig.targetGroupAuthType.length > 0 + ? targetGroupConfig.targetGroupAuthType + .toUpperCase() + .replace(" ", "_") + : "NONE" + }); + } + let target = { + name: iscsiName, + alias: null, // cannot send "" error: handler error - driver: FreeNASDriver method: CreateVolume error: {"name":"GrpcError","code":2,"message":"received error creating iscsi target - code: 422 body: {\"iscsi_target_create.alias\":[{\"message\":\"Alias already exists\",\"errno\":22}]}"} + mode: "ISCSI", + groups: targetGroups + }; + + response = await httpClient.post("/iscsi/target", target); + + // 409 if invalid + if (response.statusCode != 200) { + target = null; + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes( + "Target name already exists" + ) + ) { + target = await this.findResourceByProperties("/iscsi/target", { + name: iscsiName + }); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi target - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + target = response.body; + } + + if (!target) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi target` + ); + } + + this.ctx.logger.verbose("FreeNAS ISCSI TARGET: %j", target); + + // set target.id on zvol + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME]: target.id + }); + + let extent = { + comment: "", + type: "DISK", // Disk/File, after save Disk becomes "ZVOL" + name: iscsiName, + //iscsi_target_extent_naa: "0x3822690834aae6c5", + disk: extentDiskName, + insecure_tpc: extentInsecureTpc, + xen: extentXenCompat, + avail_threshold: extentAvailThreshold, + blocksize: Number(extentBlocksize), + pblocksize: extentDisablePhysicalBlocksize, + rpm: "" + extentRpm, // should be a string + ro: false + }; + + response = await httpClient.post("/iscsi/extent", extent); + + // 409 if invalid + if (response.statusCode != 200) { + extent = null; + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes( + "Extent name must be unique" + ) + ) { + extent = await this.findResourceByProperties("/iscsi/extent", { + name: iscsiName + }); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi extent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + extent = response.body; + } + + if (!extent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi extent` + ); + } + this.ctx.logger.verbose("FreeNAS ISCSI EXTENT: %j", extent); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME]: extent.id + }); + + // create targettoextent + let targetToExtent = { + target: target.id, + extent: extent.id, + lunid: 0 + }; + response = await httpClient.post( + "/iscsi/targetextent", + targetToExtent + ); + + if (response.statusCode != 200) { + targetToExtent = null; + + // LUN ID is already being used for this target. + // Extent is already in this target. + if ( + response.statusCode == 422 && + JSON.stringify(response.body).includes( + "Extent is already in this target." + ) && + JSON.stringify(response.body).includes( + "LUN ID is already being used for this target." + ) + ) { + targetToExtent = await this.findResourceByProperties( + "/iscsi/targetextent", + { + target: target.id, + extent: extent.id, + lunid: 0 + } + ); + } else { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error creating iscsi targetextent - code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } else { + targetToExtent = response.body; + } + + if (!targetToExtent) { + throw new GrpcError( + grpc.status.UNKNOWN, + `unknown error creating iscsi targetextent` + ); + } + this.ctx.logger.verbose( + "FreeNAS ISCSI TARGET_TO_EXTENT: %j", + targetToExtent + ); + + await zb.zfs.set(datasetName, { + [FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME]: targetToExtent.id + }); + + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + + // iqn = target + let iqn = basename + ":" + iscsiName; + this.ctx.logger.info("FreeNAS iqn: " + iqn); + + // iscsiadm -m discovery -t st -p 172.21.26.81 + // iscsiadm -m node -T iqn.2011-03.lan.bitness.istgt:test -p bitness.lan -l + + // FROM driver config? no, node attachment should have everything required to remain independent + // portal + // portals + // interface + // chap discovery + // chap session + + // FROM context + // iqn + // lun + + let volume_context = { + node_attach_driver: "iscsi", + portal: this.options.iscsi.targetPortal, + portals: this.options.iscsi.targetPortals.join(","), + interface: this.options.iscsi.interface, + //chapDiscoveryEnabled: this.options.iscsi.chapDiscoveryEnabled, + //chapSessionEnabled: this.options.iscsi.chapSessionEnabled, + iqn: iqn, + lun: 0 + }; + return volume_context; + + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown driverShareType ${driverShareType}` + ); + } + } + + async deleteShare(call, datasetName) { + const driverShareType = this.getDriverShareType(); + const httpClient = this.getHttpClient(); + const apiVersion = httpClient.getApiVersion(); + const zb = this.getZetabyte(); + + let properties; + let response; + let endpoint; + + switch (driverShareType) { + case "nfs": + try { + properties = await zb.zfs.get(datasetName, [ + FREENAS_NFS_SHARE_PROPERTY_NAME + ]); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + return; + } + throw err; + } + properties = properties[datasetName]; + this.ctx.logger.debug("zfs props data: %j", properties); + + let shareId = properties[FREENAS_NFS_SHARE_PROPERTY_NAME].value; + + // remove nfs share + if ( + properties && + properties[FREENAS_NFS_SHARE_PROPERTY_NAME] && + properties[FREENAS_NFS_SHARE_PROPERTY_NAME].value != "-" + ) { + switch (apiVersion) { + case 1: + case 2: + endpoint = "/sharing/nfs/"; + if (apiVersion == 2) { + endpoint += "id/"; + } + endpoint += shareId; + + response = await httpClient.get(endpoint); + + // assume share is gone for now + if ([500].includes(response.statusCode)) { + } else { + response = await httpClient.delete(endpoint); + + // returns a 500 if does not exist + // v1 = 204 + // v2 = 200 + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting nfs share - share: ${shareId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + } + break; + case "iscsi": + // Delete target + // NOTE: deletting a target inherently deletes associated targetgroup(s) and targettoextent(s) + + // Delete extent + try { + properties = await zb.zfs.get(datasetName, [ + FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME, + FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME, + FREENAS_ISCSI_TARGETTOEXTENT_ID_PROPERTY_NAME + ]); + } catch (err) { + if (err.toString().includes("dataset does not exist")) { + return; + } + throw err; + } + + properties = properties[datasetName]; + this.ctx.logger.debug("zfs props data: %j", properties); + + let targetId = properties[FREENAS_ISCSI_TARGET_ID_PROPERTY_NAME].value; + let extentId = properties[FREENAS_ISCSI_EXTENT_ID_PROPERTY_NAME].value; + + switch (apiVersion) { + case 1: + case 2: + // https://jira.ixsystems.com/browse/NAS-103952 + + // v1 - /services/iscsi/target/{id}/ + // v2 - /iscsi/target/id/{id} + endpoint = ""; + if (apiVersion == 1) { + endpoint += "/services"; + } + endpoint += "/iscsi/target/"; + if (apiVersion == 2) { + endpoint += "id/"; + } + endpoint += targetId; + response = await httpClient.get(endpoint); + + // assume is gone for now + if ([500].includes(response.statusCode)) { + } else { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi target - extent: ${targetId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } + + // v1 - /services/iscsi/targettoextent/{id}/ + // v2 - /iscsi/targetextent/id/{id} + if (apiVersion == 1) { + endpoint = "/services/iscsi/extent/"; + } else { + endpoint = "/iscsi/extent/id/"; + } + endpoint += extentId; + response = await httpClient.get(endpoint); + + // assume is gone for now + if ([500].includes(response.statusCode)) { + } else { + response = await httpClient.delete(endpoint); + if (![200, 204].includes(response.statusCode)) { + throw new GrpcError( + grpc.status.UNKNOWN, + `received error deleting iscsi extent - extent: ${extentId} code: ${ + response.statusCode + } body: ${JSON.stringify(response.body)}` + ); + } + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown apiVersion ${apiVersion}` + ); + } + break; + default: + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `invalid configuration: unknown driverShareType ${driverShareType}` + ); + } + } + + async expandVolume(call, datasetName) { + const driverShareType = this.getDriverShareType(); + const sshClient = this.getSshClient(); + + switch (driverShareType) { + case "iscsi": + this.ctx.logger.verbose("FreeNAS reloading ctld"); + await sshClient.exec( + sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]) + ); + break; + } + } + + async getApiVersion() { + const systemVersion = await this.getSystemVersion(); + + return 1; + } + + async getSystemVersion() { + const httpClient = this.getHttpClient(); + const endpoint = "/system/version/"; + let response; + const startApiVersion = httpClient.getApiVersion(); + const versionInfo = {}; + + httpClient.setApiVersion(2); + /** + * FreeNAS-11.2-U5 + */ + try { + response = await httpClient.get(endpoint); + if (response.statusCode == 200) { + versionInfo.v2 = response.body; + } + } catch (e) {} + + httpClient.setApiVersion(1); + /** + * {"fullversion": "FreeNAS-9.3-STABLE-201503200528", "name": "FreeNAS", "version": "9.3"} + * {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""} + */ + try { + response = await httpClient.get(endpoint); + if (response.statusCode == 200) { + versionInfo.v1 = response.body; + } + } catch (e) {} + + // reset apiVersion + httpClient.setApiVersion(startApiVersion); + + return versionInfo; + } +} + +module.exports.FreeNASDriver = FreeNASDriver; diff --git a/src/driver/index.js b/src/driver/index.js new file mode 100644 index 0000000..f2b4074 --- /dev/null +++ b/src/driver/index.js @@ -0,0 +1,798 @@ +const os = require("os"); +const path = require("path"); +const grpc = require("grpc"); +const fs = require("fs"); +const { GrpcError } = require("../utils/grpc"); +const { Mount } = require("../utils/mount"); +const { Filesystem } = require("../utils/filesystem"); +const { ISCSI } = require("../utils/iscsi"); +const sleep = require("../utils/general").sleep; + +/** + * common code shared between all drivers + * this is **NOT** meant to work as a proxy + * for the grpc calls meaning, it should not + * also operate as a facade handling directly + * the requests to the platform + */ +class CsiBaseDriver { + constructor(ctx, options) { + this.ctx = ctx; + this.options = options; + } + + async GetPluginInfo(call) { + return { + name: this.ctx.args.csiName, + vendor_version: this.ctx.args.version + }; + } + + async GetPluginCapabilities(call) { + let capabilities; + const response = { + capabilities: [] + }; + + //UNKNOWN = 0; + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + //CONTROLLER_SERVICE = 1; + + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + //VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; + capabilities = this.options.service.identity.capabilities.service || [ + "UNKNOWN" + ]; + + capabilities.forEach(item => { + response.capabilities.push({ + service: { type: item } + }); + }); + + //UNKNOWN = 0; + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + //ONLINE = 1; + + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + //OFFLINE = 2; + capabilities = this.options.service.identity.capabilities + .volume_expansion || ["UNKNOWN"]; + + capabilities.forEach(item => { + response.capabilities.push({ + volume_expansion: { type: item } + }); + }); + + return response; + } + + async Probe(call) { + return { ready: { value: true } }; + } + + async ControllerGetCapabilities(call) { + let capabilities; + const response = { + capabilities: [] + }; + + //UNKNOWN = 0; + //CREATE_DELETE_VOLUME = 1; + //PUBLISH_UNPUBLISH_VOLUME = 2; + //LIST_VOLUMES = 3; + //GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + //CREATE_DELETE_SNAPSHOT = 5; + //LIST_SNAPSHOTS = 6; + + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + //CLONE_VOLUME = 7; + + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + //PUBLISH_READONLY = 8; + + // See VolumeExpansion for details. + //EXPAND_VOLUME = 9; + capabilities = this.options.service.controller.capabilities.rpc || [ + "UNKNOWN" + ]; + + capabilities.forEach(item => { + response.capabilities.push({ + rpc: { type: item } + }); + }); + + return response; + } + + async NodeGetCapabilities(call) { + let capabilities; + const response = { + capabilities: [] + }; + + //UNKNOWN = 0; + //STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + //GET_VOLUME_STATS = 2; + // See VolumeExpansion for details. + //EXPAND_VOLUME = 3; + capabilities = this.options.service.node.capabilities.rpc || ["UNKNOWN"]; + + capabilities.forEach(item => { + response.capabilities.push({ + rpc: { type: item } + }); + }); + + return response; + } + + async NodeGetInfo(call) { + return { + node_id: process.env.CSI_NODE_ID || os.hostname(), + max_volumes_per_node: 0 + }; + } + + /** + * https://kubernetes-csi.github.io/docs/raw-block.html + * --feature-gates=BlockVolume=true,CSIBlockVolume=true + * + * StagingTargetPath is always a directory even for block volumes + * + * NOTE: stage gets called every time publish does + * + * @param {*} call + */ + async NodeStageVolume(call) { + const mount = new Mount(); + const filesystem = new Filesystem(); + const iscsi = new ISCSI(); + let result; + let device; + + const volume_id = call.request.volume_id; + const staging_target_path = call.request.staging_target_path; + const capability = call.request.volume_capability; + const access_type = capability.access_type || "mount"; + const volume_context = call.request.volume_context; + let fs_type; + let mount_flags; + const node_attach_driver = volume_context.node_attach_driver; + const block_path = staging_target_path + "/block_device"; + const bind_mount_flags = []; + bind_mount_flags.push("defaults"); + + if (access_type == "mount") { + fs_type = capability.mount.fs_type; + mount_flags = capability.mount.mount_flags || []; + mount_flags.push("defaults"); + } + + result = await this.assertCapabilities([capability]); + if (!result.valid) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `invalid capability: ${result.message}` + ); + } + + // csi spec stipulates that staging_target_path is a directory even for block mounts + result = await filesystem.pathExists(staging_target_path); + if (!result) { + await filesystem.mkdir(staging_target_path, ["-p", "-m", "0750"]); + } + + switch (node_attach_driver) { + case "nfs": + device = `${volume_context.server}:${volume_context.share}`; + break; + case "iscsi": + // create DB entry + let nodeDB = {}; + const nodeDBKeyPrefix = "node-db."; + for (const key in call.request.secrets) { + if (key.startsWith(nodeDBKeyPrefix)) { + nodeDB[key.substr(nodeDBKeyPrefix.length)] = + call.request.secrets[key]; + } + } + await iscsi.iscsiadm.createNodeDBEntry( + volume_context.iqn, + volume_context.portal, + nodeDB + ); + // login + await iscsi.iscsiadm.login(volume_context.iqn, volume_context.portal); + + // find device name + device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; + + // can take some time for device to show up, loop for some period + result = await filesystem.pathExists(device); + let timer_start = Math.round(new Date().getTime() / 1000); + let timer_max = 30; + while (!result) { + await sleep(2000); + result = await filesystem.pathExists(device); + let current_time = Math.round(new Date().getTime() / 1000); + if (!result && current_time - timer_start > timer_max) { + throw new GrpcError( + grpc.status.UNKNOWN, + `hit timeout waiting for device node to appear: ${device}` + ); + } + } + + device = await filesystem.realpath(device); + break; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `unknown/unsupported node_attach_driver: ${node_attach_driver}` + ); + } + + switch (access_type) { + case "mount": + if (await filesystem.isBlockDevice(device)) { + // format + result = await filesystem.deviceIsFormatted(device); + if (!result) { + await filesystem.formatDevice(device, fs_type); + } + + let fs_info = await filesystem.getDeviceFilesystemInfo(device); + fs_type = fs_info.type; + + // fsck + result = await mount.deviceIsMountedAtPath( + device, + staging_target_path + ); + if (!result) { + await filesystem.checkFilesystem(device, fs_type); + } + } + + result = await mount.deviceIsMountedAtPath(device, staging_target_path); + if (!result) { + await mount.mount( + device, + staging_target_path, + ["-t", fs_type].concat(["-o", mount_flags.join(",")]) + ); + } + + if (await filesystem.isBlockDevice(device)) { + // go ahead and expand fs (this covers cloned setups where expand is not explicitly invoked) + switch (fs_type) { + case "ext4": + case "ext3": + case "ext4dev": + //await filesystem.checkFilesystem(device, fs_info.type); + await filesystem.expandFilesystem(device, fs_type); + break; + case "xfs": + //await filesystem.checkFilesystem(device, fs_info.type); + await filesystem.expandFilesystem(staging_target_path, fs_type); + break; + default: + // unsupported filesystem + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `unsupported/unknown filesystem ${fs_type}` + ); + } + } + + break; + case "block": + //result = await mount.deviceIsMountedAtPath(device, block_path); + result = await mount.deviceIsMountedAtPath("dev", block_path); + if (!result) { + result = await filesystem.pathExists(staging_target_path); + if (!result) { + await filesystem.mkdir(staging_target_path, ["-p", "-m", "0750"]); + } + + result = await filesystem.pathExists(block_path); + if (!result) { + await filesystem.touch(block_path); + } + + await mount.bindMount(device, block_path, [ + "-o", + bind_mount_flags.join(",") + ]); + } + break; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `unknown/unsupported access_type: ${access_type}` + ); + } + + return {}; + } + + /** + * NOTE: only gets called when the last pod on the node using the volume is removed + * + * 1. unmount fs + * 2. logout of iscsi if neccessary + * + * @param {*} call + */ + async NodeUnstageVolume(call) { + const mount = new Mount(); + const filesystem = new Filesystem(); + const iscsi = new ISCSI(); + let result; + let is_block = false; + let block_device_info; + let access_type = "mount"; + + const volume_id = call.request.volume_id; + const staging_target_path = call.request.staging_target_path; + const block_path = staging_target_path + "/block_device"; + let normalized_staging_path = staging_target_path; + + if (!staging_target_path) { + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `missing staging_target_path` + ); + } + + //result = await mount.pathIsMounted(block_path); + //result = await mount.pathIsMounted(staging_target_path) + + result = await mount.pathIsMounted(block_path); + if (result) { + is_block = true; + access_type = "block"; + block_device_info = await filesystem.getBlockDevice(block_path); + normalized_staging_path = block_path; + } else { + result = await mount.pathIsMounted(staging_target_path); + if (result) { + let device = await mount.getMountPointDevice(staging_target_path); + result = await filesystem.isBlockDevice(device); + if (result) { + is_block = true; + block_device_info = await filesystem.getBlockDevice(device); + } + } + } + + if (is_block) { + if (block_device_info.tran == "iscsi") { + // figure out which iscsi session this belongs to and logout + // scan /dev/disk/by-path/ip-*? + // device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`; + // parse output from `iscsiadm -m session -P 3` + let sessions = await iscsi.iscsiadm.getSessionsDetails(); + for (let i = 0; i < sessions.length; i++) { + let session = sessions[i]; + let is_attached_to_session = false; + + if ( + session.attached_scsi_devices && + session.attached_scsi_devices.host && + session.attached_scsi_devices.host.devices + ) { + is_attached_to_session = session.attached_scsi_devices.host.devices.some( + device => { + if (device.attached_scsi_disk == block_device_info.name) { + return true; + } + return false; + } + ); + } + + if (is_attached_to_session) { + await iscsi.iscsiadm.logout(session.target, [ + session.persistent_portal + ]); + + let timer_start = Math.round(new Date().getTime() / 1000); + let timer_max = 30; + let deletedEntry = false; + while (!deletedEntry) { + try { + await iscsi.iscsiadm.deleteNodeDBEntry( + session.target, + session.persistent_portal + ); + deletedEntry = true; + } catch (err) { + await sleep(2000); + let current_time = Math.round(new Date().getTime() / 1000); + if (current_time - timer_start > timer_max) { + // not throwing error for now as future invocations would not enter code path anyhow + deletedEntry = true; + //throw new GrpcError( + // grpc.status.UNKNOWN, + // `hit timeout trying to delete iscsi node DB entry: ${session.target}, ${session.persistent_portal}` + //); + } + } + } + } + } + } + } + + result = await mount.pathIsMounted(normalized_staging_path); + if (result) { + result = await mount.umount(normalized_staging_path, ["--force"]); + } + + if (access_type == "block") { + // remove touched file + result = await filesystem.pathExists(block_path); + if (result) { + result = await filesystem.rm(block_path); + } + } + + result = await filesystem.pathExists(staging_target_path); + if (result) { + result = await filesystem.rmdir(staging_target_path); + } + + return {}; + } + + async NodePublishVolume(call) { + const mount = new Mount(); + const filesystem = new Filesystem(); + let result; + + const volume_id = call.request.volume_id; + const staging_target_path = call.request.staging_target_path || ""; + const target_path = call.request.target_path; + const capability = call.request.volume_capability; + const access_type = capability.access_type || "mount"; + const readonly = call.request.readonly; + const volume_context = call.request.volume_context; + const bind_mount_flags = []; + const node_attach_driver = volume_context.node_attach_driver; + + if (access_type == "mount") { + let mount_flags = capability.mount.mount_flags || []; + bind_mount_flags.push(...mount_flags); + } + + bind_mount_flags.push("defaults"); + if (readonly) bind_mount_flags.push("ro"); + + switch (node_attach_driver) { + case "nfs": + case "iscsi": + // ensure appropriate directories/files + switch (access_type) { + case "mount": + // ensure directory exists + result = await filesystem.pathExists(target_path); + if (!result) { + await filesystem.mkdir(target_path, ["-p", "-m", "0750"]); + } + + break; + case "block": + // ensure target_path directory exists as target path should be a file + let target_dir = await filesystem.dirname(target_path); + result = await filesystem.pathExists(target_dir); + if (!result) { + await filesystem.mkdir(target_dir, ["-p", "-m", "0750"]); + } + + // ensure target file exists + result = await filesystem.pathExists(target_path); + if (!result) { + await filesystem.touch(target_path); + } + break; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `unsupported/unknown access_type ${access_type}` + ); + } + + // ensure bind mount + if (staging_target_path) { + let normalized_staging_device; + let normalized_staging_path; + + if (access_type == "block") { + normalized_staging_path = staging_target_path + "/block_device"; + } else { + normalized_staging_path = staging_target_path; + } + + result = await mount.pathIsMounted(target_path); + // if not mounted, mount + if (!result) { + await mount.bindMount(normalized_staging_path, target_path, [ + "-o", + bind_mount_flags.join(",") + ]); + } else { + // if is mounted, ensure proper source + if (access_type == "block") { + normalized_staging_device = "dev"; // special syntax for single file bind mounts + } else { + normalized_staging_device = await mount.getMountPointDevice( + staging_target_path + ); + } + result = await mount.deviceIsMountedAtPath( + normalized_staging_device, + target_path + ); + if (!result) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `it appears something else is already mounted at ${target_path}` + ); + } + } + + return {}; + } + + // unsupported filesystem + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `only staged configurations are valid` + ); + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `unknown/unsupported node_attach_driver: ${node_attach_driver}` + ); + } + + return {}; + } + + async NodeUnpublishVolume(call) { + const mount = new Mount(); + const filesystem = new Filesystem(); + let result; + + const volume_id = call.request.volume_id; + const target_path = call.request.target_path; + + result = await mount.pathIsMounted(target_path); + if (result) { + result = await mount.umount(target_path, ["--force"]); + } + + result = await filesystem.pathExists(target_path); + if (result) { + if (fs.lstatSync(target_path).isDirectory()) { + result = await filesystem.rmdir(target_path); + } else { + result = await filesystem.rm([target_path]); + } + } + + return {}; + } + + async NodeGetVolumeStats(call) { + const mount = new Mount(); + const filesystem = new Filesystem(); + let result; + let device_path; + let access_type; + const volume_id = call.request.volume_id; + const volume_path = call.request.volume_path; + const block_path = volume_path + "/block_device"; + + if (!volume_path) { + throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing volume_path`); + } + + if ( + (await mount.isBindMountedBlockDevice(volume_path)) || + (await mount.isBindMountedBlockDevice(block_path)) + ) { + device_path = block_path; + access_type = "block"; + } else { + device_path = volume_path; + access_type = "mount"; + } + + switch (access_type) { + case "mount": + result = await mount.getMountDetails(device_path); + + return { + usage: [ + { + available: result.avail, + total: result.size, + used: result.used, + unit: "BYTES" + } + ] + }; + case "block": + result = await filesystem.getBlockDevice(device_path); + + return { + usage: [ + { + total: result.size, + unit: "BYTES" + } + ] + }; + default: + throw new GrpcError( + grpc.status.INVALID_ARGUMENT, + `unsupported/unknown access_type ${access_type}` + ); + } + } + + /** + * https://kubernetes-csi.github.io/docs/volume-expansion.html + * allowVolumeExpansion: true + * --feature-gates=ExpandCSIVolumes=true + * --feature-gates=ExpandInUsePersistentVolumes=true + * + * @param {*} call + */ + async NodeExpandVolume(call) { + const mount = new Mount(); + const filesystem = new Filesystem(); + let device; + let fs_info; + let device_path; + let access_type; + let is_block = false; + let is_formatted; + let fs_type; + + const volume_id = call.request.volume_id; + const volume_path = call.request.volume_path; + const block_path = volume_path + "/block_device"; + const capacity_range = call.request.capacity_range; + const volume_capability = call.request.volume_capability; + + if (!volume_path) { + throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing volume_path`); + } + + if ( + (await mount.isBindMountedBlockDevice(volume_path)) || + (await mount.isBindMountedBlockDevice(block_path)) + ) { + access_type = "block"; + device_path = block_path; + } else { + access_type = "mount"; + device_path = volume_path; + } + + try { + device = await mount.getMountPointDevice(device_path); + is_formatted = await filesystem.deviceIsFormatted(device); + is_block = await filesystem.isBlockDevice(device); + } catch (err) { + if (err.code == 1) { + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `volume_path ${volume_path} is not currently mounted` + ); + } + } + + if (is_block) { + await filesystem.rescanDevice(device); + if (is_formatted && access_type == "mount") { + fs_info = await filesystem.getDeviceFilesystemInfo(device); + fs_type = fs_info.type; + if (fs_type) { + switch (fs_type) { + case "ext4": + case "ext3": + case "ext4dev": + //await filesystem.checkFilesystem(device, fs_info.type); + await filesystem.expandFilesystem(device, fs_type); + break; + case "xfs": + let mount_info = await mount.getMountDetails(device_path); + if (mount_info.fstype == "xfs") { + //await filesystem.checkFilesystem(device, fs_info.type); + await filesystem.expandFilesystem(device_path, fs_type); + } + break; + default: + // unsupported filesystem + throw new GrpcError( + grpc.status.FAILED_PRECONDITION, + `unsupported/unknown filesystem ${fs_type}` + ); + } + } + } else { + //block device unformatted + return {}; + } + } else { + // not block device + return {}; + } + + return {}; + } +} +module.exports.CsiBaseDriver = CsiBaseDriver; diff --git a/src/utils/filesystem.js b/src/utils/filesystem.js new file mode 100644 index 0000000..dbe70a0 --- /dev/null +++ b/src/utils/filesystem.js @@ -0,0 +1,471 @@ +const cp = require("child_process"); +const fs = require("fs"); + +/** + * https://github.com/kubernetes/kubernetes/tree/master/pkg/util/mount + * https://github.com/kubernetes/kubernetes/blob/master/pkg/util/mount/mount_linux.go + */ +class Filesystem { + constructor(options = {}) { + const filesystem = this; + filesystem.options = options; + + options.paths = options.paths || {}; + + if (!options.paths.sudo) { + options.paths.sudo = "/usr/bin/sudo"; + } + + if (!options.timeout) { + options.timeout = 10 * 60 * 1000; + } + + if (!options.executor) { + options.executor = { + spawn: cp.spawn + }; + } + } + + /** + * Attempt to discover if device is a block device + * + * @param {*} device + */ + async isBlockDevice(device) { + const filesystem = this; + + if (!device.startsWith("/")) { + return false; + } + const device_path = await filesystem.realpath(device); + const blockdevices = await filesystem.getAllBlockDevices(); + + return blockdevices.some(i => { + if (i.path == device_path) { + return true; + } + return false; + }); + } + + /** + * create symlink + * + * @param {*} device + */ + async symlink(target, link, options = []) { + const filesystem = this; + let args = ["-s"]; + args = args.concat(options); + args = args.concat([target, link]); + + try { + await filesystem.exec("ln", args); + } catch (err) { + throw err; + } + } + + /** + * create symlink + * + * @param {*} device + */ + async rm(options = []) { + const filesystem = this; + let args = []; + args = args.concat(options); + + try { + await filesystem.exec("rm", args); + } catch (err) { + throw err; + } + } + + /** + * touch a path + * @param {*} path + */ + async touch(path, options = []) { + const filesystem = this; + let args = []; + args = args.concat(options); + args.push(path); + + try { + await filesystem.exec("touch", args); + } catch (err) { + throw err; + } + } + + /** + * touch a path + * @param {*} path + */ + async dirname(path) { + const filesystem = this; + let args = []; + args.push(path); + let result; + + try { + result = await filesystem.exec("dirname", args); + return result.stdout.trim(); + } catch (err) { + throw err; + } + } + + /** + * lsblk -a -b -l -J -O + */ + async getAllBlockDevices() { + const filesystem = this; + let args = ["-a", "-b", "-l", "-J", "-O"]; + let result; + + try { + result = await filesystem.exec("lsblk", args); + const parsed = JSON.parse(result.stdout); + return parsed.blockdevices; + } catch (err) { + throw err; + } + } + + /** + * lsblk -a -b -l -J -O + */ + async getBlockDevice(device) { + const filesystem = this; + device = await filesystem.realpath(device); + let args = ["-a", "-b", "-l", "-J", "-O"]; + args.push(device); + let result; + + try { + result = await filesystem.exec("lsblk", args); + const parsed = JSON.parse(result.stdout); + return parsed.blockdevices[0]; + } catch (err) { + throw err; + } + } + + /** + * blkid -p -o export + * + * @param {*} device + */ + async deviceIsFormatted(device) { + const filesystem = this; + let args = ["-p", "-o", "export", device]; + let result; + + try { + result = await filesystem.exec("blkid", args); + } catch (err) { + if (err.code == 2) { + return false; + } + throw err; + } + + return true; + } + + /** + * blkid -p -o export + * + * @param {*} device + */ + async getDeviceFilesystemInfo(device) { + const filesystem = this; + let args = ["-p", "-o", "export", device]; + let result; + + try { + result = await filesystem.exec("blkid", args); + const entries = result.stdout.trim().split("\n"); + const properties = {}; + let fields, key, value; + entries.forEach(entry => { + fields = entry.split("="); + key = fields[0].toLowerCase(); + value = fields[1]; + properties[key] = value; + }); + + return properties; + } catch (err) { + throw err; + } + } + + /** + * mkfs. [] device + * + * @param {*} device + * @param {*} fstype + * @param {*} options + */ + async formatDevice(device, fstype, options = []) { + const filesystem = this; + let args = []; + args = args.concat(options); + switch (fstype) { + case "vfat": + args = args.concat(["-I"]); + break; + } + args.push(device); + let result; + + try { + result = await filesystem.exec("mkfs." + fstype, args); + return result; + } catch (err) { + throw err; + } + } + + async realpath(path) { + const filesystem = this; + let args = [path]; + let result; + + try { + result = await filesystem.exec("realpath", args); + return result.stdout.trim(); + } catch (err) { + throw err; + } + } + + async rescanDevice(device) { + const filesystem = this; + let result; + let device_name; + + result = await filesystem.isBlockDevice(device); + if (!result) { + throw new Error( + `cannot rescan device ${device} because it is not a block device` + ); + } + + result = await filesystem.realpath(device); + device_name = result.split("/").pop(); + + // echo 1 > /sys/block/sdb/device/rescan + const sys_file = `/sys/block/${device_name}/device/rescan`; + fs.writeFileSync(sys_file, "1"); + } + + /** + * expand a give filesystem + * + * @param {*} device + * @param {*} fstype + * @param {*} options + */ + async expandFilesystem(device, fstype, options = []) { + const filesystem = this; + let command; + let args = []; + let result; + + switch (fstype.toLowerCase()) { + case "ext4": + case "ext3": + case "ext4dev": + command = "resize2fs"; + args = args.concat(options); + args.push(device); + break; + case "xfs": + command = "xfs_growfs"; + args = args.concat(options); + args.push(device); // in this case should be a mounted path + break; + case "vfat": + // must be unmounted + command = "fatresize"; + args = args.concat(options); + args = args.concat(["-s", "max"]); + args.push(device); + break; + } + + try { + result = await filesystem.exec(command, args); + return result; + } catch (err) { + throw err; + } + } + + /** + * expand a give filesystem + * + * fsck [options] -- [fs-options] [ ...] + * + * @param {*} device + * @param {*} fstype + * @param {*} options + * @param {*} fsoptions + */ + async checkFilesystem(device, fstype, options = [], fsoptions = []) { + const filesystem = this; + let command; + let args = []; + let result; + + switch (fstype.toLowerCase()) { + case "ext4": + case "ext3": + case "ext4dev": + command = "fsck"; + args = args.concat(options); + args.push(device); + args.push("--"); + args = args.concat(fsoptions); + args.push("-f"); + args.push("-p"); + break; + case "xfs": + command = "xfs_repair"; + args = args.concat(["-o", "force_geometry"]); + args = args.concat(options); + args.push(device); + break; + default: + command = "fsck"; + args = args.concat(options); + args.push(device); + args.push("--"); + args = args.concat(fsoptions); + break; + } + + try { + result = await filesystem.exec(command, args); + return result; + } catch (err) { + throw err; + } + } + + /** + * mkdir [] + * + * @param {*} path + * @param {*} options + */ + async mkdir(path, options = []) { + const filesystem = this; + let args = []; + args = args.concat(options); + args.push(path); + + try { + await filesystem.exec("mkdir", args); + } catch (err) { + throw err; + } + return true; + } + + /** + * rmdir [] + * + * @param {*} path + * @param {*} options + */ + async rmdir(path, options = []) { + const filesystem = this; + let args = []; + args = args.concat(options); + args.push(path); + + try { + await filesystem.exec("rmdir", args); + } catch (err) { + throw err; + } + return true; + } + + /** + * + * @param {*} path + */ + async pathExists(path) { + const filesystem = this; + let args = []; + args.push(path); + + try { + await filesystem.exec("stat", args); + } catch (err) { + return false; + } + return true; + } + + exec(command, args, options) { + const filesystem = this; + args = args || []; + + let timeout; + let stdout = ""; + let stderr = ""; + + if (filesystem.options.sudo) { + args.unshift(command); + command = filesystem.options.paths.sudo; + } + console.log("executing fileystem command: %s %s", command, args.join(" ")); + const child = filesystem.options.executor.spawn(command, args, options); + + let didTimeout = false; + if (options && options.timeout) { + timeout = setTimeout(() => { + didTimeout = true; + child.kill(options.killSignal || "SIGTERM"); + }, options.timeout); + } + + return new Promise((resolve, reject) => { + child.stdout.on("data", function(data) { + stdout = stdout + data; + }); + + child.stderr.on("data", function(data) { + stderr = stderr + data; + }); + + child.on("close", function(code) { + const result = { code, stdout, stderr }; + if (timeout) { + clearTimeout(timeout); + } + if (code) { + console.log( + "failed to execute filesystem command: %s, response: %j", + [command].concat(args).join(" "), + result + ); + reject(result); + } else { + resolve(result); + } + }); + }); + } +} + +module.exports.Filesystem = Filesystem; diff --git a/src/utils/general.js b/src/utils/general.js new file mode 100644 index 0000000..872a894 --- /dev/null +++ b/src/utils/general.js @@ -0,0 +1,7 @@ +function sleep(ms){ + return new Promise(resolve=>{ + setTimeout(resolve,ms) + }) +} + +module.exports.sleep = sleep; \ No newline at end of file diff --git a/src/utils/grpc.js b/src/utils/grpc.js new file mode 100644 index 0000000..a84bd90 --- /dev/null +++ b/src/utils/grpc.js @@ -0,0 +1,9 @@ +class GrpcError { + constructor(code, message = "") { + this.name = "GrpcError"; + this.code = code; + this.message = message; + } +} + +module.exports.GrpcError = GrpcError; diff --git a/src/utils/iscsi.js b/src/utils/iscsi.js new file mode 100644 index 0000000..88f4eb7 --- /dev/null +++ b/src/utils/iscsi.js @@ -0,0 +1,483 @@ +const cp = require("child_process"); + +function getIscsiValue(value) { + if (value == "") return null; + return value; +} + +class ISCSI { + constructor(options = {}) { + const iscsi = this; + iscsi.options = options; + + options.paths = options.paths || {}; + if (!options.paths.iscsiadm) { + options.paths.iscsiadm = "iscsiadm"; + } + + if (!options.paths.sudo) { + options.paths.sudo = "/usr/bin/sudo"; + } + + if (!options.timeout) { + options.timeout = 10 * 60 * 1000; + } + + if (!options.executor) { + options.executor = { + spawn: cp.spawn + }; + } + + iscsi.iscsiadm = { + /** + * iscsiadm -m iface -o show + * iface_name transport_name,hwaddress,ipaddress,net_ifacename,initiatorname + */ + async listInterfaces() { + let args = []; + args = args.concat(["-m", "iface", "-o", "show"]); + const result = await iscsi.exec(options.paths.iscsiadm, args); + + // return empty list if no stdout data + if (!result.stdout) { + return []; + } + + const entries = result.stdout.trim().split("\n"); + const interfaces = []; + let fields; + entries.forEach(entry => { + fields = entry.split(" "); + interfaces.push({ + iface_name: fields[0], + transport_name: fields[1].split(",")[0], + hwaddress: getIscsiValue(fields[1].split(",")[1]), + ipaddress: getIscsiValue(fields[1].split(",")[2]), + net_ifacename: getIscsiValue(fields[1].split(",")[3]), + initiatorname: getIscsiValue(fields[1].split(",")[4]) + }); + }); + + return interfaces; + }, + + /** + * iscsiadm -m iface -o show -I + * + * @param {*} iface + */ + async showInterface(iface) { + let args = []; + args = args.concat(["-m", "iface", "-o", "show", "-I", iface]); + let result = await iscsi.exec(options.paths.iscsiadm, args); + + const entries = result.stdout.trim().split("\n"); + const i = {}; + let fields, key, value; + entries.forEach(entry => { + if (entry.startsWith("#")) return; + fields = entry.split("="); + key = fields[0].trim(); + value = fields[1].trim(); + i[key] = getIscsiValue(value); + }); + + return i; + }, + + /** + * iscsiadm --mode node -T -p -o new + * + * @param {*} tgtIQN + * @param {*} portal + * @param {*} attributes + */ + async createNodeDBEntry(tgtIQN, portal, attributes = {}) { + let args = []; + args = args.concat([ + "-m", + "node", + "-T", + tgtIQN, + "-p", + portal, + "-o", + "new" + ]); + await iscsi.exec(options.paths.iscsiadm, args); + for (let attribute in attributes) { + let args = []; + args = args.concat([ + "-m", + "node", + "-T", + tgtIQN, + "-p", + portal, + "-o", + "update", + "--name", + attribute, + "--value", + attributes[attribute] + ]); + await iscsi.exec(options.paths.iscsiadm, args); + } + }, + + /** + * iscsiadm --mode node -T -p -o delete + * + * @param {*} tgtIQN + * @param {*} portal + */ + async deleteNodeDBEntry(tgtIQN, portal) { + let args = []; + args = args.concat([ + "-m", + "node", + "-T", + tgtIQN, + "-p", + portal, + "-o", + "delete" + ]); + await iscsi.exec(options.paths.iscsiadm, args); + }, + + /** + * iscsiadm -m session + */ + async getSessions() { + let args = []; + args = args.concat(["-m", "session"]); + let result; + try { + result = await iscsi.exec(options.paths.iscsiadm, args); + } catch (err) { + // no active sessions + if (err.code == 21) { + result = err; + } else { + throw err; + } + } + + // return empty list if no stdout data + if (!result.stdout) { + return []; + } + + // protocol: [id] ip:port,target_portal_group_tag targetname + const entries = result.stdout.trim().split("\n"); + const sessions = []; + let fields; + entries.forEach(entry => { + fields = entry.split(" "); + sessions.push({ + protocol: entry.split(":")[0], + id: fields[1].replace("[", "").replace("]", ""), + portal: fields[2].split(",")[0], + target_portal_group_tag: fields[2].split(",")[1], + iqn: fields[3].split(":")[0], + target: fields[3].split(":")[1] + }); + }); + + return sessions; + }, + + /** + * iscsiadm -m session + */ + async getSessionsDetails() { + let args = []; + args = args.concat(["-m", "session", "-P", "3"]); + let result; + try { + result = await iscsi.exec(options.paths.iscsiadm, args); + } catch (err) { + // no active sessions + if (err.code == 21) { + result = err; + } else { + throw err; + } + } + + // return empty list if no stdout data + if (!result.stdout) { + return []; + } + + let sessionGroups = []; + let currentSession = []; + + // protocol: [id] ip:port,target_portal_group_tag targetname + const entries = result.stdout.trim().split("\n"); + // remove first 2 lines + entries.shift(); + entries.shift(); + + for (let i = 0; i < entries.length; i++) { + let entry = entries[i]; + if (entry.startsWith("Target:")) { + if (currentSession.length > 0) { + sessionGroups.push(currentSession); + } + currentSession = [entry]; + } else { + currentSession.push(entry); + } + if (i + 1 == entries.length) { + sessionGroups.push(currentSession); + } + } + + const sessions = []; + for (let i = 0; i < sessionGroups.length; i++) { + let sessionLines = sessionGroups[i]; + let session = {}; + let currentSection; + for (let j = 0; j < sessionLines.length; j++) { + let line = sessionLines[j].trim(); + + let uniqueChars = String.prototype.concat(...new Set(line)); + if (uniqueChars == "*") { + currentSection = sessionLines[j + 1] + .trim() + .toLowerCase() + .replace(/ /g, "_") + .replace(/\W/g, ""); + j++; + j++; + continue; + } + + let key = line + .split(":", 1)[0] + .trim() + .replace(/ /g, "_") + .replace(/\W/g, ""); + let value = line + .split(":") + .slice(1) + .join(":") + .trim(); + + if (currentSection) { + session[currentSection] = session[currentSection] || {}; + switch (currentSection) { + case "attached_scsi_devices": + key = key.toLowerCase(); + if (key == "host_number") { + session[currentSection]["host"] = { + number: value.split("\t")[0], + state: value + .split("\t") + .slice(1) + .join("\t") + .split(":") + .slice(1) + .join(":") + .trim() + }; + while ( + sessionLines[j + 1] && + sessionLines[j + 1].trim().startsWith("scsi") + ) { + session[currentSection]["host"]["devices"] = + session[currentSection]["host"]["devices"] || []; + let line1p = sessionLines[j + 1].split(" "); + let line2 = sessionLines[j + 2]; + let line2p = ""; + if (line2) { + line2p = line2.split(" "); + session[currentSection]["host"]["devices"].push({ + channel: line1p[2], + id: line1p[4], + lun: line1p[6], + attached_scsi_disk: line2p[3].split("\t")[0], + state: line2 + .trim() + .split("\t") + .slice(1) + .join("\t") + .split(":") + .slice(1) + .join(":") + .trim() + }); + } + + j++; + j++; + } + continue; + } + break; + case "negotiated_iscsi_params": + key = key.charAt(0).toLowerCase() + key.slice(1); + key = key.replace( + /[A-Z]/g, + letter => `_${letter.toLowerCase()}` + ); + break; + } + key = key.toLowerCase(); + session[currentSection][key] = value; + } else { + key = key.toLowerCase(); + if (key == "target") { + value = value.split(" ")[0]; + } + session[key.trim()] = value.trim(); + } + } + sessions.push(session); + } + + return sessions; + }, + + /** + * iscsiadm -m discovery -t st -p + * + * @param {*} portal + */ + async discoverTargets(portal) { + let args = []; + args = args.concat(["-m", "discovery"]); + args = args.concat(["-t", "sendtargets"]); + args = args.concat(["-p", portal]); + + let result; + try { + result = await iscsi.exec(options.paths.iscsiadm, args); + } catch (err) { + throw err; + } + + // return empty list if no stdout data + if (!result.stdout) { + return []; + } + + const entries = result.stdout.trim().split("\n"); + const targets = []; + entries.forEach(entry => { + targets.push({ + portal: entry.split(",")[0], + target_portal_group_tag: entry.split(" ")[0].split(",")[1], + iqn: entry.split(" ")[1].split(":")[0], + target: entry.split(" ")[1].split(":")[1] + }); + }); + + return targets; + }, + + /** + * iscsiadm -m node -T -p -l + * + * @param {*} tgtIQN + * @param {*} portal + */ + async login(tgtIQN, portal) { + let args = []; + args = args.concat(["-m", "node", "-T", tgtIQN, "-p", portal, "-l"]); + + try { + await iscsi.exec(options.paths.iscsiadm, args); + } catch (err) { + // already logged in + if (err.code == 15) { + return true; + } + throw err; + } + + return true; + }, + + /** + * + * + * @param {*} tgtIQN + * @param {*} portals + */ + async logout(tgtIQN, portals) { + let args = []; + args = args.concat(["-m", "node", "-T", tgtIQN]); + + if (!Array.isArray(portals)) { + portals = [portals]; + } + portals.forEach(p => { + iscsi + .exec(options.paths.iscsiadm, args.concat(["-p", p, "-u"])) + .then(() => {}) + .catch(err => { + if (err.code == 21) { + // no matching sessions + } + }); + }); + + return true; + }, + + async deleteDBEntry(tgtIQN) {} + }; + } + + exec(command, args, options) { + const iscsi = this; + args = args || []; + + let timeout; + let stdout = ""; + let stderr = ""; + + if (iscsi.options.sudo) { + args.unshift(command); + command = iscsi.options.paths.sudo; + } + console.log("executing iscsi command: %s %s", command, args.join(" ")); + const child = iscsi.options.executor.spawn(command, args, options); + + let didTimeout = false; + if (options && options.timeout) { + timeout = setTimeout(() => { + didTimeout = true; + child.kill(options.killSignal || "SIGTERM"); + }, options.timeout); + } + + return new Promise((resolve, reject) => { + child.stdout.on("data", function(data) { + stdout = stdout + data; + }); + + child.stderr.on("data", function(data) { + stderr = stderr + data; + }); + + child.on("close", function(code) { + const result = { code, stdout, stderr }; + if (timeout) { + clearTimeout(timeout); + } + if (code) { + reject(result); + } else { + resolve(result); + } + }); + }); + } +} + +module.exports.ISCSI = ISCSI; diff --git a/src/utils/logger.js b/src/utils/logger.js new file mode 100644 index 0000000..8b01130 --- /dev/null +++ b/src/utils/logger.js @@ -0,0 +1,116 @@ +/** + * Levels + * + * error: 0 + * warn: 1 + * info: 2 + * verbose: 3 + * debug: 4 + * silly: 5 + */ + +const winston = require("winston"); +const bunyan = require("bunyan"); + +const env = process.env.NODE_ENV || "development"; +let level = process.env.DEMOCRATIC_CSI_LOG_LEVEL || null; + +if (!level) { + if (env == "production") { + level = "info"; + } else { + level = "verbose"; + } +} + +let formatters; +let defaultMeta; +if (env == "production") { + formatters = [winston.format.json()]; + defaultMeta = { service: "democratic-csi" }; +} else { + formatters = [winston.format.colorize(), winston.format.simple()]; + defaultMeta = {}; +} + +const logger = winston.createLogger({ + level: level, + format: winston.format.combine( + winston.format.errors({ stack: true }), + winston.format.splat(), + ...formatters + ), + defaultMeta: defaultMeta, + transports: [ + new winston.transports.Console({ + handleExceptions: true + }) + ] +}); + +/** + * A Bunyan raw stream object (i.e. has a `.write(rec)` method that takes a + * Bunyan log record) that shims logging to a given Winston logger. + * + * @param {winston.Logger} wlog is a Winston Logger to which to shim. + */ +function Bunyan2Winston(wlog) { + this.wlog = wlog; +} +Bunyan2Winston.prototype.write = function write(rec) { + // Map to the appropriate Winston log level (by default 'info', 'warn' + // or 'error') and call signature: `wlog.log(level, msg, metadata)`. + var wlevel; + if (rec.level <= bunyan.INFO) { + wlevel = "info"; + } else if (rec.level <= bunyan.WARN) { + wlevel = "warn"; + } else { + wlevel = "error"; + } + + // Note: We are *modifying* the log record here. This could be a problem + // if our Bunyan logger had other streams. This one doesn't. + var msg = rec.msg; + delete rec.msg; + + // Remove internal bunyan fields that won't mean anything outside of + // a bunyan context. + delete rec.v; + delete rec.level; + // TODO: more? + + // Note: Winston doesn't handle *objects* in the 'metadata' field well + // (e.g. the Bunyan record 'time' field is a Date instance, 'req' and + // 'res' are typically objects). With 'json: true' on a Winston transport + // it is a bit better, but still messes up 'date'. What exactly to do + // here is perhaps user-preference. + rec.time = String(rec.time); + //Object.keys(rec).forEach(function (key) { + // if (typeof(rec[key]) === "object") { + // rec[key] = JSON.stringify(rec[key]) + // } + //}); + + this.wlog.log(wlevel, msg, rec); +}; + +// Pass a Bunyan logger to restify that shims to our winston Logger. +var shim = bunyan.createLogger({ + name: "eas", + streams: [ + { + type: "raw", + level: "trace", + stream: new Bunyan2Winston(logger) + } + ] +}); + +logger.bunyan = shim; + +//global.console = logger; + +module.exports = { + logger: logger +}; diff --git a/src/utils/mount.js b/src/utils/mount.js new file mode 100644 index 0000000..893b41c --- /dev/null +++ b/src/utils/mount.js @@ -0,0 +1,328 @@ +const cp = require("child_process"); +const { Filesystem } = require("../utils/filesystem"); + +FINDMNT_COMMON_OPTIONS = [ + "--output", + "source,target,fstype,label,options,avail,size,used", + "-b", + "-J" +]; + +class Mount { + constructor(options = {}) { + const mount = this; + mount.options = options; + + options.paths = options.paths || {}; + if (!options.paths.mount) { + options.paths.mount = "mount"; + } + + if (!options.paths.umount) { + options.paths.umount = "umount"; + } + + if (!options.paths.findmnt) { + options.paths.findmnt = "findmnt"; + } + + if (!options.paths.sudo) { + options.paths.sudo = "/usr/bin/sudo"; + } + + if (!options.timeout) { + options.timeout = 10 * 60 * 1000; + } + + if (!options.executor) { + options.executor = { + spawn: cp.spawn + }; + } + } + + /** + * findmnt --source --output source,target,fstype,label,options,avail,size,used -b -J + * + * @param {*} device + */ + async deviceIsMounted(device) { + const filesystem = new Filesystem(); + if (device.startsWith("/")) { + device = await filesystem.realpath(device); + } + + const mount = this; + let args = []; + args = args.concat(["--source", device]); + args = args.concat(FINDMNT_COMMON_OPTIONS); + let result; + + try { + result = await mount.exec(mount.options.paths.findmnt, args); + } catch (err) { + // no results + if (err.code == 1) { + return false; + } else { + throw err; + } + } + + return true; + } + + /** + * findmnt --mountpoint / --output source,target,fstype,label,options,avail,size,used -b -J + * + * @param {*} device + */ + async pathIsMounted(path) { + const mount = this; + let args = []; + args = args.concat(["--mountpoint", path]); + args = args.concat(FINDMNT_COMMON_OPTIONS); + let result; + + try { + result = await mount.exec(mount.options.paths.findmnt, args); + } catch (err) { + // no results + if (err.code == 1) { + return false; + } else if ( + err.code == 32 && + err.stderr && + err.stderr.contains("No such file or directory") + ) { + return false; + } else { + throw err; + } + } + + return true; + } + + /** + * findmnt --source --mountpoint --output source,target,fstype,label,options,avail,size,used -b -J + * + * @param {*} device + */ + async deviceIsMountedAtPath(device, path) { + const filesystem = new Filesystem(); + if (device.startsWith("/")) { + device = await filesystem.realpath(device); + } + + const mount = this; + let args = []; + args = args.concat(["--source", device]); + args = args.concat(["--mountpoint", path]); + args = args.concat(FINDMNT_COMMON_OPTIONS); + let result; + + try { + result = await mount.exec(mount.options.paths.findmnt, args); + } catch (err) { + // no results + if (err.code == 1) { + return false; + } else { + throw err; + } + } + + return true; + } + + /** + * findmnt --mountpoint / --output source,target,fstype,label,options,avail,size,used -b -J + * + * @param {*} path + */ + async getMountDetails(path) { + const mount = this; + let args = []; + args = args.concat(["--mountpoint", path]); + args = args.concat(FINDMNT_COMMON_OPTIONS); + let result; + + try { + result = await mount.exec(mount.options.paths.findmnt, args); + const parsed = JSON.parse(result.stdout); + return parsed.filesystems[0]; + } catch (err) { + throw err; + } + } + + /** + * Get the device (source) at the given mount point + * + * @param {*} path + */ + async getMountPointDevice(path) { + const mount = this; + const result = await mount.getMountDetails(path); + if (result.fstype == "devtmpfs") { + // dev[/sdb] + let source = "/"; + source += result.source; + source = source.replace("[", ""); + source = source.replace("]", ""); + + return source.trim(); + } + return result.source.trim(); + } + + /** + * very specifically looking for *devices* vs *filesystems/directories* which were bind mounted + * + * @param {*} path + */ + async isBindMountedBlockDevice(path) { + const filesystem = new Filesystem(); + const mount = this; + + const is_mounted = await mount.pathIsMounted(path); + if (!is_mounted) { + return false; + } + const mount_info = await mount.getMountDetails(path); + const is_block = filesystem.isBlockDevice(path); + if (mount_info.fstype == "devtmpfs" && is_block) { + return true; + } + return false; + } + + /** + * Get the filesystem type at mount point + * + * @param {*} path + */ + async getMountPointFsType(path) { + const mount = this; + const result = await mount.getMountDetails(path); + return result.fstype; + } + + /** + * mount [options] + * + * @param {*} source + * @param {*} target + * @param {*} options + */ + async mount(source, target, options = []) { + const mount = this; + let args = []; + args = args.concat(options); + args = args.concat([source, target]); + + let result; + try { + result = await mount.exec(mount.options.paths.mount, args); + return result; + } catch (err) { + throw err; + } + } + + /** + * mount [] + * + * @param {*} source + * @param {*} target + * @param {*} options + */ + async bindMount(source, target, options = []) { + const mount = this; + let args = []; + args.push("--bind"); + args = args.concat(options); + args = args.concat([source, target]); + + let result; + try { + result = await mount.exec(mount.options.paths.mount, args); + return result; + } catch (err) { + throw err; + } + } + + /** + * umount [options] | + * + * @param {*} target + * @param {*} options + */ + async umount(target, options = []) { + const mount = this; + let args = []; + args = args.concat(options); + args.push(target); + + try { + await mount.exec(mount.options.paths.umount, args); + } catch (err) { + if (err.code == 32) { + return true; + } else { + throw err; + } + } + return true; + } + + exec(command, args, options) { + const mount = this; + args = args || []; + + let timeout; + let stdout = ""; + let stderr = ""; + + if (mount.options.sudo) { + args.unshift(command); + command = mount.options.paths.sudo; + } + console.log("executing mount command: %s %s", command, args.join(" ")); + const child = mount.options.executor.spawn(command, args, options); + + let didTimeout = false; + if (options && options.timeout) { + timeout = setTimeout(() => { + didTimeout = true; + child.kill(options.killSignal || "SIGTERM"); + }, options.timeout); + } + + return new Promise((resolve, reject) => { + child.stdout.on("data", function(data) { + stdout = stdout + data; + }); + + child.stderr.on("data", function(data) { + stderr = stderr + data; + }); + + child.on("close", function(code) { + const result = { code, stdout, stderr }; + if (timeout) { + clearTimeout(timeout); + } + if (code) { + reject(result); + } else { + resolve(result); + } + }); + }); + } +} + +module.exports.Mount = Mount; diff --git a/src/utils/ssh.js b/src/utils/ssh.js new file mode 100644 index 0000000..f0148cb --- /dev/null +++ b/src/utils/ssh.js @@ -0,0 +1,97 @@ +var Client = require("ssh2").Client; + +class SshClient { + constructor(options = {}) { + this.options = options; + this.options.connection = this.options.connection || {}; + if (this.options.logger) { + this.logger = this.options.logger; + } else { + this.logger = console; + } + } + + /** + * Build a command line from the name and given args + * TODO: escape the arguments + * + * @param {*} name + * @param {*} args + */ + buildCommand(name, args = []) { + args.unshift(name); + return args.join(" "); + } + + debug() { + this.logger.silly(...arguments); + } + + async exec(command, options = {}, stream_proxy = null) { + const client = this; + return new Promise((resolve, reject) => { + var conn = new Client(); + + if (client.options.connection.debug == true) { + client.options.connection.debug = function(msg) { + client.debug(msg); + }; + } + + + conn + .on("error", function(err) { + client.debug("Client :: error"); + reject(err); + }) + .on("ready", function() { + client.debug("Client :: ready"); + conn.exec(command, options, function(err, stream) { + if (err) reject(err); + let stderr; + let stdout; + stream + .on("close", function(code, signal) { + client.debug( + "Stream :: close :: code: " + code + ", signal: " + signal + ); + if (stream_proxy) { + stream_proxy.emit("close", ...arguments); + } + resolve({ stderr, stdout, code, signal }); + conn.end(); + }) + .on("data", function(data) { + client.debug("STDOUT: " + data); + if (stream_proxy) { + stream_proxy.stdout.emit("data", ...arguments); + } + if (stdout == undefined) { + stdout = ""; + } + stdout = stdout.concat(data); + }) + .stderr.on("data", function(data) { + client.debug("STDERR: " + data); + if (stream_proxy) { + stream_proxy.stderr.emit("data", ...arguments); + } + if (stderr == undefined) { + stderr = ""; + } + stderr = stderr.concat(data); + }); + }); + }) + .connect(client.options.connection); + + if (stream_proxy) { + stream_proxy.on("kill", signal => { + conn.end(); + }); + } + }); + } +} + +module.exports.SshClient = SshClient; diff --git a/src/utils/zfs.js b/src/utils/zfs.js new file mode 100644 index 0000000..0d189cc --- /dev/null +++ b/src/utils/zfs.js @@ -0,0 +1,1620 @@ +const events = require("events"); +const cp = require("child_process"); + +class Zetabyte { + constructor(options = {}) { + const zb = this; + zb.options = options; + + options.paths = options.paths || {}; + if (!options.paths.zpool) { + options.paths.zpool = "/sbin/zpool"; + } + + if (!options.paths.zfs) { + options.paths.zfs = "/sbin/zfs"; + } + + if (!options.paths.sudo) { + options.paths.sudo = "/usr/bin/sudo"; + } + + if (!options.timeout) { + options.timeout = 10 * 60 * 1000; + } + + if (!options.executor) { + options.executor = { + spawn: cp.spawn + }; + } + + zb.DEFAULT_ZPOOL_LIST_PROPERTIES = [ + "name", + "size", + "allocated", + "free", + "cap", + "health", + "altroot" + ]; + + zb.DEFAULT_ZFS_LIST_PROPERTIES = [ + "name", + "used", + "avail", + "refer", + "type", + "mountpoint" + ]; + + zb.helpers = { + zfsErrorStr: function(error, stderr) { + if (!error) return null; + + if (error.killed) return "Process killed due to timeout."; + + return error.message || (stderr ? stderr.toString() : ""); + }, + + zfsError: function(error, stderr) { + return new Error(zb.helpers.zfsErrorStr(error, stderr)); + }, + + parseTabSeperatedTable: function(data) { + if (!data) { + return []; + } + + const lines = data.trim().split("\n"); + const rows = []; + + for (let i = 0, numLines = lines.length; i < numLines; i++) { + if (lines[i]) { + rows.push(lines[i].split("\t")); + } + } + + return rows; + }, + + /* + * Parse the output of `zfs get ...`, invoked by zfs.get below. The output has + * the form: + * + * + * + * and those fields are tab-separated. + */ + parsePropertyList: function(data) { + if (!data) { + return {}; + } + + const lines = data.trim().split("\n"); + const properties = {}; + + lines.forEach(function(line) { + const fields = line.split("\t"); + if (!properties[fields[0]]) properties[fields[0]] = {}; + properties[fields[0]][fields[1]] = { + value: fields[2], + received: fields[3], + source: fields[4] + }; + }); + + return properties; + }, + + listTableToPropertyList: function(properties, data) { + const entries = []; + data.forEach(row => { + let entry = {}; + properties.forEach((value, index) => { + entry[value] = row[index]; + }); + entries.push(entry); + }); + + return entries; + }, + + extractSnapshotName: function(datasetName) { + return datasetName.substring(datasetName.indexOf("@") + 1); + }, + + extractDatasetName: function(datasetName) { + if (datasetName.includes("@")) { + return datasetName.substring(0, datasetName.indexOf("@")); + } + + return datasetName; + }, + + isZfsSnapshot: function(snapshotName) { + return snapshotName.includes("@"); + }, + + extractPool: function(datasetName) { + const parts = datasetName.split("/"); + return parts[0]; + }, + + extractParentDatasetName: function(datasetName) { + const parts = datasetName.split("/"); + parts.pop(); + return parts.join("/"); + }, + + extractLeafName: function(datasetName) { + return datasetName.split("/").pop(); + }, + + isPropertyValueSet: function(value) { + if ( + value === undefined || + value === null || + value == "" || + value == "-" + ) { + return false; + } + + return true; + }, + + generateZvolSize: function(capacity_bytes, block_size) { + block_size = "" + block_size; + block_size = block_size.toLowerCase(); + switch (block_size) { + case "512": + block_size = 512; + break; + case "1024": + case "1k": + block_size = 1024; + break; + case "2048": + case "2k": + block_size = 2048; + break; + case "4096": + case "4k": + block_size = 4096; + break; + case "8192": + case "8k": + block_size = 8192; + break; + case "16384": + case "16k": + block_size = 16384; + break; + case "32768": + case "32k": + block_size = 32768; + break; + case "65536": + case "64k": + block_size = 65536; + break; + case "131072": + case "128k": + block_size = 131072; + break; + } + + capacity_bytes = Number(capacity_bytes); + let result = block_size * Math.round(capacity_bytes / block_size); + if (result < capacity_bytes) + result = Number(result) + Number(block_size); + + return result; + } + }; + + zb.zpool = { + /** + * zpool add [-fn] pool vdev ... + * + * @param {*} pool + * @param {*} vdevs + */ + add: function(pool, vdevs) { + // -f force + // -n noop + }, + + /** + * zpool attach [-f] pool device new_device + * + * @param {*} pool + * @param {*} device + * @param {*} new_device + */ + attach: function(pool, device, new_device) { + // -f Forces use of new_device, even if its appears to be in use. + }, + + /** + * zpool checkpoint [-d, --discard] pool + * + * @param {*} pool + */ + checkpoint: function(pool) {}, + + /** + * zpool clear [-F [-n]] pool [device] + * + * @param {*} pool + * @param {*} device + */ + clear: function(pool, device) {}, + + /** + * zpool create [-fnd] [-o property=value] ... [-O + * file-system-property=value] ... [-m mountpoint] [-R root] [-t + * tempname] pool vdev ... + * + * This allows fine-grained control and exposes all features of the + * zpool create command, including log devices, cache devices, and hot spares. + * The input is an object of the form produced by the disklayout library. + */ + create: function(pool, options) { + if (arguments.length != 2) + throw Error("Invalid arguments, 2 arguments required"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("create"); + + if (options.force) args.push("-f"); + if (options.noop) args.push("-n"); + if (options.disableFeatures) args.push("-d"); + if (options.properties) { + for (const [key, value] of Object.entries(options.properties)) { + args.push("-o"); + args.push(`${key}=${value}`); + } + } + if (options.fsProperties) { + for (const [key, value] of Object.entries(options.fsProperties)) { + args.push("-O"); + args.push(`${key}=${value}`); + } + } + if (options.mountpoint) + args = args.concat(["-m", options.mountpoint]); + if (options.root) args = args.concat(["-R", options.root]); + if (options.tempname) args = args.concat(["-t", options.tempname]); + + args.push(pool); + options.vdevs.forEach(function(vdev) { + if (vdev.type) args.push(vdev.type); + if (vdev.devices) { + vdev.devices.forEach(function(dev) { + args.push(dev.name); + }); + } else { + args.push(vdev.name); + } + }); + + if (options.spares) { + args.push("spare"); + options.spares.forEach(function(dev) { + args.push(dev.name); + }); + } + + if (options.logs) { + args.push("log"); + options.logs.forEach(function(dev) { + args.push(dev.name); + }); + } + + if (options.cache) { + args.push("cache"); + options.cache.forEach(function(dev) { + args.push(dev.name); + }); + } + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool destroy [-f] pool + * + * @param {*} pool + */ + destroy: function(pool) { + if (arguments.length != 1) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("destroy"); + if (options.force) args.push("-f"); + args.push(pool); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool detach pool device + * + * @param {*} pool + * @param {*} device + */ + detach: function(pool, device) { + if (arguments.length != 2) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("detach"); + args.push(pool); + args.push(device); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool export [-f] pool ... + * + * @param {*} pool + */ + export: function(pool) { + if (arguments.length != 2) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("export"); + if (options.force) args.push("-f"); + if (Array.isArray(pool)) { + pool.forEach(item => { + args.push(item); + }); + } else { + args.push(pool); + } + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool get [-Hp] [-o field[,...]] all | property[,...] pool ... + */ + get: function() {}, + + /** + * zpool history [-il] [pool] ... + * + * @param {*} pool + */ + history: function(pool) { + return new Promise((resolve, reject) => { + let args = []; + args.push("history"); + if (options.internal) args.push("-i"); + if (options.longFormat) args.push("-l"); + if (Array.isArray(pool)) { + pool.forEach(item => { + args.push(item); + }); + } else { + args.push(pool); + } + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool import [-d dir | -c cachefile] [-D] + * + * zpool import [-o mntopts] [-o property=value] ... [-d dir | -c cachefile] + * [-D] [-f] [-m] [-N] [-R root] [-F [-n]] -a + * + * zpool import [-o mntopts] [-o property=value] ... [-d dir | -c cachefile] + * [-D] [-f] [-m] [-N] [-R root] [-t] [-F [-n]] pool | id [newpool] + * + * + * + * @param {*} options + */ + import: function(options = {}) { + return new Promise((resolve, reject) => { + let args = []; + args.push("import"); + if (options.dir) args = args.concat(["-d", options.dir]); + if (options.cachefile) args = args.concat(["-c", options.cachefile]); + if (options.destroyed) args.push("-D"); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool iostat [-T d|u] [-v] [pool] ... [interval [count]] + * + * @param {*} options + */ + iostat: function(options = {}) {}, + + /** + * zpool labelclear [-f] device + * + * @param {*} device + */ + labelclear: function(device) {}, + + /** + * zpool list [-Hpv] [-o property[,...]] [-T d|u] [pool] ... [inverval + * [count]] + * + * @param {*} pool + * @param {*} options + */ + list: function(pool, properties, options = {}) { + if (!(arguments.length >= 1)) throw Error("Invalid arguments"); + if (!properties) properties = zb.DEFAULT_ZPOOL_LIST_PROPERTIES; + + return new Promise((resolve, reject) => { + let args = []; + args.push("list"); + if (!("parse" in options)) options.parse = true; + if (!("parseable" in options)) options.parsable = true; + if (options.parseable || options.parse) args.push("-Hp"); + if (options.verbose) args.push("-v"); + if (properties) { + if (Array.isArray(properties)) { + if (properties.length == 0) { + properties = zb.DEFAULT_ZPOOL_LIST_PROPERTIES; + } + args.push("-o"); + args.push(properties.join(",")); + } else { + args.push("-o"); + args.push(properties); + } + } + if (options.timestamp) args = args.concat(["-T", options.timestamp]); + if (pool) { + if (Array.isArray(pool)) { + pool.forEach(item => { + args.push(item); + }); + } else { + args.push(pool); + } + } + if (options.interval) args.push(options.interval); + if (options.count) args.push(options.count); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + if (options.parse) { + let data = zb.helpers.parseTabSeperatedTable(stdout); + let indexed = zb.helpers.listTableToPropertyList( + properties, + data + ); + return resolve({ + properties, + data, + indexed + }); + } + return resolve({ properties, data: stdout }); + } + ); + }); + }, + + /** + * zpool offline [-t] pool device ... + * + * @param {*} pool + * @param {*} device + * @param {*} options + */ + offline: function(pool, device, options = {}) { + return new Promise((resolve, reject) => { + let args = []; + args.push("offline"); + if (options.temporary) args.push("-t"); + args.push(pool); + args.push(device); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool online [-e] pool device ... + * + * @param {*} pool + * @param {*} device + * @param {*} options + */ + online: function(pool, device, options = {}) { + return new Promise((resolve, reject) => { + let args = []; + args.push("online"); + if (options.expand) args.push("-e"); + args.push(pool); + args.push(device); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool reguid pool + * + * @param {*} pool + */ + reguid: function(pool) { + return new Promise((resolve, reject) => { + let args = []; + args.push("reguid"); + args.push(pool); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool remove [-np] pool device ... + * + * zpool remove -s pool + * + * @param {*} pool + * @param {*} device + */ + remove: function(pool, device, options = {}) { + return new Promise((resolve, reject) => { + let args = []; + args.push("remove"); + if (options.noop) args.push("-n"); + if (options.parsable) args.push("-p"); + if (options.stop) args.push("-s"); + args.push(pool); + if (device) { + args.push(device); + } + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool reopen pool + * + * @param {*} pool + */ + reopen: function(pool) { + return new Promise((resolve, reject) => { + let args = []; + args.push("reopen"); + args.push(pool); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool replace [-f] pool device [new_device] + * + * @param {*} pool + * @param {*} device + * @param {*} new_device + */ + replace: function(pool, device, new_device) { + return new Promise((resolve, reject) => { + let args = []; + args.push("replace"); + if (options.force) args.push("-f"); + args.push(pool); + args.push(device); + if (new_device) { + args.push(new_device); + } + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool scrub [-s | -p] pool ... + * + * @param {*} pool + */ + scrub: function(pool) { + return new Promise((resolve, reject) => { + let args = []; + args.push("scrub"); + if (options.stop) args.push("-s"); + if (options.pause) args.push("-p"); + if (Array.isArray(pool)) { + pool.forEach(item => { + args.push(item); + }); + } else { + args.push(pool); + } + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool set property=value pool + * + * @param {*} pool + * @param {*} property + * @param {*} value + */ + set: function(pool, property, value) { + return new Promise((resolve, reject) => { + let args = []; + args.push("set"); + args.push(`${property}=${value}`); + args.push(pool); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + }, + + /** + * zpool split [-n] [-R altroot] [-o mntopts] [-o property=value] pool + * newpool [device ...] + * + * @param {*} pool + * @param {*} newpool + * @param {*} device + */ + split: function(pool, newpool, device) {}, + + /** + * zpool status [-vx] [-T d|u] [pool] ... [interval [count]] + */ + status: function(pool, options = {}) { + return new Promise((resolve, reject) => { + let args = []; + if (!("parse" in options)) options.parse = true; + args.push("status"); + if (options.verbose) args.push("-v"); + if (options.exhibiting) args.push("-x"); + if (options.timestamp) args = args.concat(["-T", options.timestamp]); + if (pool) { + if (Array.isArray(pool)) { + pool.forEach(item => { + args.push(item); + }); + } else { + args.push(pool); + } + } + if (options.interval) args.push(options.interval); + if (options.count) args.push(options.count); + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (options.parse) { + stdout = stdout.trim(); + if (error || stdout == "no pools available\n") { + return resolve("UNKNOWN"); + } + + const lines = stdout.split("\n"); + for (var i = 0; i < lines.length; i++) { + if (lines[i].trim().substr(0, 5) === "state") { + return resolve(lines[i].trim().substr(7)); + } + } + return resolve("UNKNOWN"); + } else { + if (error) return reject(stderr); + return resolve(stdout); + } + } + ); + }); + }, + + /** + * zpool upgrade [-v] + * + * zpool upgrade [-V version] -a | pool ... + * + * @param {*} pool + */ + upgrade: function(pool) { + return new Promise((resolve, reject) => { + let args = []; + args.push("upgrade"); + if (options.version) args = args.concat(["-V", options.version]); + if (options.all) args.push("-a"); + if (pool) { + if (Array.isArray(pool)) { + pool.forEach(item => { + args.push(item); + }); + } else { + args.push(pool); + } + } + + zb.exec( + zb.options.paths.zpool, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(stderr); + return resolve(stdout); + } + ); + }); + } + }; + + zb.zfs = { + /** + * zfs create [-pu] [-o property=value]... filesystem + * zfs create [-ps] [-b blocksize] [-o property=value]... -V size volume + * + * @param {*} dataset + * @param {*} options + */ + create: function(dataset, options = {}) { + if (!(arguments.length >= 1)) throw new (Error("Invalid arguments"))(); + + return new Promise((resolve, reject) => { + const idempotent = + "idempotent" in options + ? options.idempotent + : "idempotent" in zb.options + ? zb.options.idempotent + : false; + + let args = []; + args.push("create"); + if (options.parents) args.push("-p"); + if (options.unmounted) args.push("-u"); + if (options.blocksize) args = args.concat(["-b", options.blocksize]); + if (options.properties) { + for (const [key, value] of Object.entries(options.properties)) { + args.push("-o"); + args.push(`${key}=${value}`); + } + } + if (options.size) args = args.concat(["-V", options.size]); + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if ( + error && + !(idempotent && stderr.includes("dataset already exists")) + ) + return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs destroy [-fnpRrv] filesystem|volume + * zfs destroy [-dnpRrv] snapshot[%snapname][,...] + * zfs destroy filesystem|volume#bookmark + * + * + * @param {*} dataset + * @param {*} options + */ + destroy: function(dataset, options = {}) { + if (!(arguments.length >= 1)) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + const idempotent = + "idempotent" in options + ? options.idempotent + : "idempotent" in zb.options + ? zb.options.idempotent + : false; + + let args = []; + args.push("destroy"); + if (!("parseable" in options)) options.parseable = true; + if (options.recurse) args.push("-r"); + if (options.dependents) args.push("-R"); + if (options.force) args.push("-f"); + if (options.noop) args.push("-n"); + if (options.parseable) args.push("-p"); + if (options.verbose) args.push("-v"); + if (options.defer) args.push("-d"); + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if ( + error && + !( + idempotent && + (stderr.includes("dataset does not exist") || + stderr.includes("could not find any snapshots to destroy")) + ) + ) + return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs snapshot|snap [-r] [-o property=value]... + * filesystem@snapname|volume@snapname + * filesystem@snapname|volume@snapname... + * + * @param {*} dataset + * @param {*} options + */ + snapshot: function(dataset, options = {}) { + if (!(arguments.length >= 1)) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + const idempotent = + "idempotent" in options + ? options.idempotent + : "idempotent" in zb.options + ? zb.options.idempotent + : false; + + let args = []; + args.push("snapshot"); + if (options.recurse) args.push("-r"); + if (options.properties) { + for (const [key, value] of Object.entries(options.properties)) { + args.push("-o"); + args.push(`${key}=${value}`); + } + } + if (Array.isArray(dataset)) { + dataset = dataset.join(" "); + } + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if ( + error && + !(idempotent && stderr.includes("dataset already exists")) + ) + return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs rollback [-rRf] snapshot + * + * @param {*} dataset + * @param {*} options + */ + rollback: function(dataset, options = {}) { + if (!(arguments.length >= 1)) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("rollback"); + if (options.recent) args.push("-r"); + if (options.dependents) args.push("-R"); + if (options.force) args.push("-f"); + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + /** + * cannot rollback to 'foo/bar/baz@foobar': more recent snapshots or bookmarks exist + * use '-r' to force deletion of the following snapshots and bookmarks: + */ + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs clone [-p] [-o property=value]... snapshot filesystem|volume + * + * @param {*} snapshot + * @param {*} dataset + * @param {*} options + */ + clone: function(snapshot, dataset, options = {}) { + if (!(arguments.length >= 2)) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + const idempotent = + "idempotent" in options + ? options.idempotent + : "idempotent" in zb.options + ? zb.options.idempotent + : false; + + let args = []; + args.push("clone"); + if (options.parents) args.push("-p"); + if (options.properties) { + for (const [key, value] of Object.entries(options.properties)) { + args.push("-o"); + args.push(`${key}=${value}`); + } + } + args.push(snapshot); + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if ( + error && + !(idempotent && stderr.includes("dataset already exists")) + ) + return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * /bin/sh -c "zfs send [] | zfs receive [] + * + * @param {*} source + * @param {*} send_options + * @param {*} target + * @param {*} receive_options + */ + send_receive(source, send_options = [], target, receive_options = []) { + if (arguments.length < 4) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = ["-c"]; + let command = []; + command = command.concat(["zfs", "send"]); + command = command.concat(send_options); + command.push(source); + + command.push("|"); + + command = command.concat(["zfs", "receive"]); + command = command.concat(receive_options); + command.push(target); + + args.push("'" + command.join(" ") + "'"); + + zb.exec("/bin/sh", args, { timeout: zb.options.timeout }, function( + error, + stdout, + stderr + ) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + }); + }); + }, + + /** + * zfs promote clone-filesystem + * + * @param {*} dataset + */ + promote: function(dataset) { + if (arguments.length != 1) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("promote"); + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs rename [-f] filesystem|volume|snapshot filesystem|volume|snapshot + * zfs rename [-f] -p filesystem|volume filesystem|volume + * zfs rename -u [-p] filesystem filesystem + * zfs rename -r snapshot snapshot + * + * @param {*} source + * @param {*} target + * @param {*} options + */ + rename: function(source, target, options = {}) { + if (!(arguments.length >= 2)) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("rename"); + if (options.parents) args.push("-p"); + if (options.unmounted) args.push("-u"); + if (options.force) args.push("-f"); + if (options.recurse) args.push("-r"); + args.push(source); + args.push(target); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs list [-r|-d depth] [-Hp] [-o property[,property]...] [-t + * type[,type]...] [-s property]... [-S property]... + * filesystem|volume|snapshot... + * + * @param {*} dataset + * @param {*} options + */ + list: function(dataset, properties, options = {}) { + if (!(arguments.length >= 1)) throw Error("Invalid arguments"); + if (!properties) properties = zb.DEFAULT_ZFS_LIST_PROPERTIES; + + return new Promise((resolve, reject) => { + let args = []; + args.push("list"); + if (!("parse" in options)) options.parse = true; + if (!("parseable" in options)) options.parsable = true; + if (options.recurse) args.push("-r"); + if (options.depth) args = args.concat(["-d", options.depth]); + if (options.parseable || options.parse) args.push("-Hp"); + if (options.types) { + let types; + if (Array.isArray(options.types)) { + types = options.types.join(","); + } else { + types = options.types; + } + args = args.concat(["-t", types]); + } + + if (properties) { + if (Array.isArray(properties)) { + if (properties.length == 0) { + properties = zb.DEFAULT_ZFS_LIST_PROPERTIES; + } + args.push("-o"); + args.push(properties.join(",")); + } else { + args.push("-o"); + args.push(properties); + } + } + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + if (options.parse) { + let data = zb.helpers.parseTabSeperatedTable(stdout); + let indexed = zb.helpers.listTableToPropertyList( + properties, + data + ); + return resolve({ + properties, + data, + indexed + }); + } + return resolve({ properties, data: stdout }); + } + ); + }); + }, + + /** + * zfs set property=value [property=value]... filesystem|volume|snapshot + * + * @param {*} dataset + * @param {*} properties + */ + set: function(dataset, properties) { + if (arguments.length != 2) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + if (!Object.keys(properties).length) { + resolve(); + return; + } + + let args = []; + args.push("set"); + + if (properties) { + for (const [key, value] of Object.entries(properties)) { + args.push(`${key}=${value}`); + } + } + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs get [-r|-d depth] [-Hp] [-o all | field[,field]...] [-t + * type[,type]...] [-s source[,source]...] all | property[,property]... + * filesystem|volume|snapshot|bookmark... + * + * -o options: name,property,value,received,source - default name,property,value,source + * -t options: filesystem, snapshot, volume - default all + * -s options: local,default,inherited,temporary,received,none - default all + * + * @param {*} dataset + * @param {*} properties + */ + get: function(dataset, properties = "all", options = {}) { + if (!(arguments.length >= 2)) throw Error("Invalid arguments"); + if (!properties) properties = "all"; + if (Array.isArray(properties) && !properties.length > 0) + properties = "all"; + + return new Promise((resolve, reject) => { + let args = []; + args.push("get"); + if (!("parse" in options)) options.parse = true; + if (!("parseable" in options)) options.parsable = true; + if (options.recurse) args.push("-r"); + if (options.depth) args.concat(["-d", options.depth]); + if (options.parseable || options.parse) args.push("-Hp"); + if (options.parse) + args = args.concat([ + "-o", + ["name", "property", "value", "received", "source"] + ]); + if (options.fields && !options.parse) { + let fields; + if (Array.isArray(options.fields)) { + fields = options.fields.join(","); + } else { + fields = options.fields; + } + + args = args.concat(["-o", fields]); + } + if (options.types) { + let types; + if (Array.isArray(options.types)) { + types = options.types.join(","); + } else { + types = options.types; + } + args = args.concat(["-t", types]); + } + if (options.sources) { + let sources; + if (Array.isArray(options.sources)) { + sources = options.sources.join(","); + } else { + sources = options.sources; + } + args = args.concat(["-s", sources]); + } + + if (properties) { + if (Array.isArray(properties)) { + if (properties.length > 0) { + args.push(properties.join(",")); + } else { + args.push("all"); + } + } else { + args.push(properties); + } + } else { + args.push("all"); + } + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + if (options.parse) { + return resolve(zb.helpers.parsePropertyList(stdout)); + } + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs inherit [-rS] property filesystem|volume|snapshot... + * + * @param {*} dataset + * @param {*} property + */ + inherit: function(dataset, property) { + if (arguments.length != 2) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("inherit"); + if (options.recurse) args.push("-r"); + if (options.received) args.push("-S"); + args.push(property); + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs remap filesystem|volume + * + * @param {*} dataset + */ + remap: function(dataset) { + if (arguments.length != 1) throw Error("Invalid arguments"); + + return new Promise((resolve, reject) => { + let args = []; + args.push("remap"); + args.push(dataset); + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + }, + + /** + * zfs upgrade [-v] + * zfs upgrade [-r] [-V version] -a | filesystem + * + * @param {*} dataset + */ + upgrade: function(options = {}, dataset) { + return new Promise((resolve, reject) => { + let args = []; + args.push("upgrade"); + if (options.versions) args.push("-v"); + if (options.recurse) args.push("-r"); + if (options.version) args = args.concat(["-V", options.version]); + if (options.all) args = args.push("-a"); + if (dataset) { + args.push(dataset); + } + + zb.exec( + zb.options.paths.zfs, + args, + { timeout: zb.options.timeout }, + function(error, stdout, stderr) { + if (error) return reject(zb.helpers.zfsError(error, stderr)); + return resolve(stdout); + } + ); + }); + } + }; + } + + /** + * Should be a matching interface for spawn roughly + * + */ + exec() { + const zb = this; + let command = arguments[0]; + let args, options, callback, timeout; + let stdout = ""; + let stderr = ""; + switch (arguments.length) { + case 1: + break; + case 2: + callback = arguments[arguments.length - 1]; + break; + case 3: + callback = arguments[arguments.length - 1]; + args = arguments[arguments.length - 2]; + break; + case 4: + callback = arguments[arguments.length - 1]; + options = arguments[arguments.length - 2]; + args = arguments[arguments.length - 3]; + break; + } + + if (zb.options.sudo) { + args = args || []; + args.unshift(command); + command = zb.options.paths.sudo; + } + + const child = zb.options.executor.spawn(command, args, options); + + let didTimeout = false; + if (options && options.timeout) { + timeout = setTimeout(() => { + didTimeout = true; + child.kill(options.killSignal || "SIGTERM"); + }, options.timeout); + } + + if (callback) { + child.stdout.on("data", function(data) { + stdout = stdout + data; + }); + + child.stderr.on("data", function(data) { + stderr = stderr + data; + }); + + child.on("close", function(error) { + if (timeout) { + clearTimeout(timeout); + } + if (error) { + if (didTimeout) { + error.killed = true; + } + callback(zb.helpers.zfsError(error, stderr), stdout, stderr); + } + callback(null, stdout, stderr); + }); + } + + return child; + } +} +exports.Zetabyte = Zetabyte; + +class ZfsSshProcessManager { + constructor(client) { + this.client = client; + } + + /** + * Build a command line from the name and given args + * TODO: escape the arguments + * + * @param {*} name + * @param {*} args + */ + buildCommand(name, args = []) { + args.unshift(name); + return args.join(" "); + } + + /** + * https://nodejs.org/api/child_process.html#child_process_child_process_spawn_command_args_options + * + * should return something similar to a child_process that handles the following: + * - child.stdout.on('data') + * - child.stderr.on('data') + * - child.on('close') + * - child.kill() + */ + spawn() { + const client = this.client; + + //client.debug("ZfsProcessManager spawn", this); + + // Create an eventEmitter object + var stdout = new events.EventEmitter(); + var stderr = new events.EventEmitter(); + var proxy = new events.EventEmitter(); + + proxy.stdout = stdout; + proxy.stderr = stderr; + proxy.kill = function(signal = "SIGTERM") { + proxy.emit("kill", signal); + }; + + const command = this.buildCommand(arguments[0], arguments[1]); + + client.debug("ZfsProcessManager arguments: " + JSON.stringify(arguments)); + client.logger.verbose("ZfsProcessManager command: " + command); + + client.exec(command, {}, proxy).catch(err => { + proxy.stderr.emit("data", err.message); + proxy.emit("close", 1, "SIGQUIT"); + }); + + return proxy; + } +} +exports.ZfsSshProcessManager = ZfsSshProcessManager;