Merge pull request #7 from Bledai/Update-path-creation-and-save-data-after-removing-PV
Update path creation and implemented possibility save data after removing PV
This commit is contained in:
commit
3623b4c2b5
19
README.md
19
README.md
|
|
@ -107,6 +107,16 @@ spec:
|
|||
|
||||
You may also want to change the PROVISIONER_NAME above from ``fuseim.pri/ifs`` to something more descriptive like ``nfs-storage``, but if you do remember to also change the PROVISIONER_NAME in the storage class definition below:
|
||||
|
||||
**Step 5: Deploying your storage class**
|
||||
|
||||
***Parameters:***
|
||||
|
||||
| Name | Description | Default |
|
||||
|------|-------------|:--------:|
|
||||
| onDelete | If it exists and has a delete value, delete the directory, if it exists and has a retain value, save the directory. | will be archived with name on the share: `archived-+volume.Name` |
|
||||
| archiveOnDelete | If it exists and has a false value, delete the directory. if `onDelete` exists, `archiveOnDelete` will be ignored. | will be archived with name on the share: `archived-+volume.Name` |
|
||||
| pathPattern | Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace. To specify metadata use `${.PVC.}`: `${PVC.namespace}`| n/a |
|
||||
|
||||
This is `deploy/class.yaml` which defines the NFS-Client's Kubernetes Storage Class:
|
||||
|
||||
```yaml
|
||||
|
|
@ -116,11 +126,11 @@ metadata:
|
|||
name: managed-nfs-storage
|
||||
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
|
||||
parameters:
|
||||
archiveOnDelete: "false" # When set to "false" your PVs will not be archived
|
||||
# by the provisioner upon deletion of the PVC.
|
||||
pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" # waits for nfs.io/storage-path annotation, if not specified will accept as empty string.
|
||||
onDelete: delete
|
||||
```
|
||||
|
||||
**Step 5: Finally, test your environment!**
|
||||
**Step 6: Finally, test your environment!**
|
||||
|
||||
Now we'll test your NFS provisioner.
|
||||
|
||||
|
|
@ -138,7 +148,7 @@ kubectl delete -f deploy/test-pod.yaml -f deploy/test-claim.yaml
|
|||
|
||||
Now check the folder has been deleted.
|
||||
|
||||
**Step 6: Deploying your own PersistentVolumeClaims**. To deploy your own PVC, make sure that you have the correct `storage-class` as indicated by your `deploy/class.yaml` file.
|
||||
**Step 7: Deploying your own PersistentVolumeClaims**. To deploy your own PVC, make sure that you have the correct `storage-class` as indicated by your `deploy/class.yaml` file.
|
||||
|
||||
For example:
|
||||
|
||||
|
|
@ -149,6 +159,7 @@ metadata:
|
|||
name: test-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
|
||||
nfs.io/storage-path: "test-path" # not required, depending on whether this annotation was shown in the storage class description
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
|
|
|
|||
|
|
@ -22,18 +22,19 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/sig-storage-lib-external-provisioner/controller"
|
||||
)
|
||||
|
|
@ -48,6 +49,29 @@ type nfsProvisioner struct {
|
|||
path string
|
||||
}
|
||||
|
||||
type pvcMetadata struct {
|
||||
data map[string]string
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
var pattern = regexp.MustCompile(`\${\.PVC\.((labels|annotations)\.(.*?)|.*?)}`)
|
||||
|
||||
func (meta *pvcMetadata) stringParser(str string) string {
|
||||
result := pattern.FindAllStringSubmatch(str, -1)
|
||||
for _, r := range result {
|
||||
switch r[2] {
|
||||
case "labels":
|
||||
str = strings.Replace(str, r[0], meta.labels[r[3]], -1)
|
||||
case "annotations":
|
||||
str = strings.Replace(str, r[0], meta.annotations[r[3]], -1)
|
||||
default:
|
||||
str = strings.Replace(str, r[0], meta.data[r[1]], -1)
|
||||
}
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
const (
|
||||
mountPath = "/persistentvolumes"
|
||||
)
|
||||
|
|
@ -65,15 +89,31 @@ func (p *nfsProvisioner) Provision(options controller.ProvisionOptions) (*v1.Per
|
|||
|
||||
pvName := strings.Join([]string{pvcNamespace, pvcName, options.PVName}, "-")
|
||||
|
||||
metadata := &pvcMetadata{
|
||||
data: map[string]string{
|
||||
"name": pvcName,
|
||||
"namespace": pvcNamespace,
|
||||
},
|
||||
labels: options.PVC.Labels,
|
||||
annotations: options.PVC.Annotations,
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(mountPath, pvName)
|
||||
path := filepath.Join(p.path, pvName)
|
||||
|
||||
pathPattern, exists := options.StorageClass.Parameters["pathPattern"]
|
||||
if exists {
|
||||
customPath := metadata.stringParser(pathPattern)
|
||||
path = filepath.Join(p.path, customPath)
|
||||
fullPath = filepath.Join(mountPath, customPath)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("creating path %s", fullPath)
|
||||
if err := os.MkdirAll(fullPath, 0777); err != nil {
|
||||
return nil, errors.New("unable to create directory to provision new pv: " + err.Error())
|
||||
}
|
||||
os.Chmod(fullPath, 0777)
|
||||
|
||||
path := filepath.Join(p.path, pvName)
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: options.PVName,
|
||||
|
|
@ -81,7 +121,7 @@ func (p *nfsProvisioner) Provision(options controller.ProvisionOptions) (*v1.Per
|
|||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy,
|
||||
AccessModes: options.PVC.Spec.AccessModes,
|
||||
//MountOptions: options.MountOptions,
|
||||
// MountOptions: options.MountOptions,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
|
||||
},
|
||||
|
|
@ -99,8 +139,9 @@ func (p *nfsProvisioner) Provision(options controller.ProvisionOptions) (*v1.Per
|
|||
|
||||
func (p *nfsProvisioner) Delete(volume *v1.PersistentVolume) error {
|
||||
path := volume.Spec.PersistentVolumeSource.NFS.Path
|
||||
pvName := filepath.Base(path)
|
||||
oldPath := filepath.Join(mountPath, pvName)
|
||||
relativePath := strings.Replace(path, p.path, "", 1)
|
||||
oldPath := filepath.Join(mountPath, relativePath)
|
||||
|
||||
if _, err := os.Stat(oldPath); os.IsNotExist(err) {
|
||||
glog.Warningf("path %s does not exist, deletion skipped", oldPath)
|
||||
return nil
|
||||
|
|
@ -110,6 +151,20 @@ func (p *nfsProvisioner) Delete(volume *v1.PersistentVolume) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine if the "onDelete" parameter exists.
|
||||
// If it exists and has a delete value, delete the directory.
|
||||
// If it exists and has a retain value, safe the directory.
|
||||
onDelete := storageClass.Parameters["onDelete"]
|
||||
switch onDelete {
|
||||
|
||||
case "delete":
|
||||
return os.RemoveAll(oldPath)
|
||||
|
||||
case "retain":
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determine if the "archiveOnDelete" parameter exists.
|
||||
// If it exists and has a false value, delete the directory.
|
||||
// Otherwise, archive it.
|
||||
|
|
@ -124,10 +179,9 @@ func (p *nfsProvisioner) Delete(volume *v1.PersistentVolume) error {
|
|||
}
|
||||
}
|
||||
|
||||
archivePath := filepath.Join(mountPath, "archived-"+pvName)
|
||||
archivePath := filepath.Join(mountPath, "archived-"+volume.Name)
|
||||
glog.V(4).Infof("archiving path %s to %s", oldPath, archivePath)
|
||||
return os.Rename(oldPath, archivePath)
|
||||
|
||||
}
|
||||
|
||||
// getClassForVolume returns StorageClass
|
||||
|
|
|
|||
Loading…
Reference in New Issue