Skip to content
This repository was archived by the owner on Mar 22, 2018. It is now read-only.

Commit 195993d

Browse files
author
Kubernetes Submit Queue
authored
Merge pull request #45345 from codablock/storageclass_fstype
Automatic merge from submit-queue (batch tested with PRs 45345, 49470, 49407, 49448, 49486) Support "fstype" parameter in dynamically provisioned PVs This PR is a replacement for kubernetes/kubernetes#40805. I was not able to push fixes and rebases to the original branch as I don't have access to the Github organization anymore. I assume the PR will need a new "ok to test" **ORIGINAL PR DESCRIPTION** **What this PR does / why we need it**: This PR allows specifying the desired FSType when dynamically provisioning volumes with storage classes. The FSType can now be set as a parameter: ```yaml kind: StorageClass apiVersion: storage.k8s.io/v1beta1 metadata: name: test provisioner: kubernetes.io/azure-disk parameters: fstype: xfs ``` **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #37801 **Special notes for your reviewer**: The PR also implicitly adds checks for unsupported parameters. **Release note**: ```release-note Support specifying of FSType in StorageClass ```
2 parents 12e7415 + 7fd9776 commit 195993d

3 files changed

Lines changed: 17 additions & 15 deletions

File tree

pkg/volume/cinder/cinder.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ import (
3232
"k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace"
3333
"k8s.io/kubernetes/pkg/util/keymutex"
3434
"k8s.io/kubernetes/pkg/util/mount"
35-
"k8s.io/kubernetes/pkg/util/strings"
35+
kstrings "k8s.io/kubernetes/pkg/util/strings"
3636
"k8s.io/kubernetes/pkg/volume"
3737
"k8s.io/kubernetes/pkg/volume/util"
3838
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
@@ -241,7 +241,7 @@ type cdManager interface {
241241
// Detaches the disk from the kubelet's host machine.
242242
DetachDisk(unmounter *cinderVolumeUnmounter) error
243243
// Creates a volume
244-
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error)
244+
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
245245
// Deletes a volume
246246
DeleteVolume(deleter *cinderVolumeDeleter) error
247247
}
@@ -380,7 +380,7 @@ func makeGlobalPDName(host volume.VolumeHost, devName string) string {
380380

381381
func (cd *cinderVolume) GetPath() string {
382382
name := cinderVolumePluginName
383-
return cd.plugin.host.GetPodVolumeDir(cd.podUID, strings.EscapeQualifiedNameForDisk(name), cd.volName)
383+
return cd.plugin.host.GetPodVolumeDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name), cd.volName)
384384
}
385385

386386
type cinderVolumeUnmounter struct {
@@ -467,7 +467,7 @@ var _ volume.Deleter = &cinderVolumeDeleter{}
467467

468468
func (r *cinderVolumeDeleter) GetPath() string {
469469
name := cinderVolumePluginName
470-
return r.plugin.host.GetPodVolumeDir(r.podUID, strings.EscapeQualifiedNameForDisk(name), r.volName)
470+
return r.plugin.host.GetPodVolumeDir(r.podUID, kstrings.EscapeQualifiedNameForDisk(name), r.volName)
471471
}
472472

473473
func (r *cinderVolumeDeleter) Delete() error {
@@ -486,7 +486,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
486486
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
487487
}
488488

489-
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
489+
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
490490
if err != nil {
491491
return nil, err
492492
}
@@ -508,7 +508,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
508508
PersistentVolumeSource: v1.PersistentVolumeSource{
509509
Cinder: &v1.CinderVolumeSource{
510510
VolumeID: volumeID,
511-
FSType: "ext4",
511+
FSType: fstype,
512512
ReadOnly: false,
513513
},
514514
},

pkg/volume/cinder/cinder_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
116116
return nil
117117
}
118118

119-
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error) {
120-
return "test-volume-name", 1, nil, nil
119+
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
120+
return "test-volume-name", 1, nil, "", nil
121121
}
122122

123123
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {

pkg/volume/cinder/cinder_util.go

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -158,10 +158,10 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
158158
return zones, nil
159159
}
160160

161-
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, err error) {
161+
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
162162
cloud, err := c.plugin.getCloudProvider()
163163
if err != nil {
164-
return "", 0, nil, err
164+
return "", 0, nil, "", err
165165
}
166166

167167
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
@@ -179,21 +179,23 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
179179
vtype = v
180180
case "availability":
181181
availability = v
182+
case volume.VolumeParameterFSType:
183+
fstype = v
182184
default:
183-
return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
185+
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
184186
}
185187
}
186188
// TODO: implement PVC.Selector parsing
187189
if c.options.PVC.Spec.Selector != nil {
188-
return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
190+
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
189191
}
190192

191193
if availability == "" {
192194
// No zone specified, choose one randomly in the same region
193195
zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient())
194196
if err != nil {
195197
glog.V(2).Infof("error getting zone information: %v", err)
196-
return "", 0, nil, err
198+
return "", 0, nil, "", err
197199
}
198200
// if we did not get any zones, lets leave it blank and gophercloud will
199201
// use zone "nova" as default
@@ -205,15 +207,15 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
205207
volumeID, volumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
206208
if errr != nil {
207209
glog.V(2).Infof("Error creating cinder volume: %v", errr)
208-
return "", 0, nil, errr
210+
return "", 0, nil, "", errr
209211
}
210212
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
211213

212214
// these are needed that pod is spawning to same AZ
213215
volumeLabels = make(map[string]string)
214216
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
215217

216-
return volumeID, volSizeGB, volumeLabels, nil
218+
return volumeID, volSizeGB, volumeLabels, fstype, nil
217219
}
218220

219221
func probeAttachedVolume() error {

0 commit comments

Comments
 (0)