From ff68db79b056fd3f0e11f718e0892c919974293e Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:02 +0200 Subject: [PATCH 01/11] Add API objects --- pkg/apis/extensions/install/install.go | 1 + pkg/apis/extensions/register.go | 2 + pkg/apis/extensions/types.go | 42 +++++ pkg/apis/extensions/v1beta1/register.go | 2 + pkg/apis/extensions/v1beta1/types.go | 44 ++++++ pkg/apis/extensions/validation/validation.go | 49 ++++++ .../extensions/validation/validation_test.go | 73 +++++++++ .../unversioned/generated_expansion.go | 2 + pkg/client/unversioned/extensions.go | 5 + pkg/client/unversioned/storageclasses.go | 87 +++++++++++ pkg/client/unversioned/storageclasses_test.go | 147 ++++++++++++++++++ .../testclient/fake_storage_classes.go | 74 +++++++++ .../unversioned/testclient/testclient.go | 4 + pkg/kubectl/resource_printer.go | 29 ++++ pkg/master/master.go | 5 + pkg/registry/storageclass/doc.go | 17 ++ pkg/registry/storageclass/etcd/etcd.go | 77 +++++++++ pkg/registry/storageclass/etcd/etcd_test.go | 129 +++++++++++++++ pkg/registry/storageclass/strategy.go | 98 ++++++++++++ pkg/registry/storageclass/strategy_test.go | 69 ++++++++ 20 files changed, 956 insertions(+) create mode 100644 pkg/client/unversioned/storageclasses.go create mode 100644 pkg/client/unversioned/storageclasses_test.go create mode 100644 pkg/client/unversioned/testclient/fake_storage_classes.go create mode 100644 pkg/registry/storageclass/doc.go create mode 100644 pkg/registry/storageclass/etcd/etcd.go create mode 100644 pkg/registry/storageclass/etcd/etcd_test.go create mode 100644 pkg/registry/storageclass/strategy.go create mode 100644 pkg/registry/storageclass/strategy_test.go diff --git a/pkg/apis/extensions/install/install.go b/pkg/apis/extensions/install/install.go index 1279dfb1f7de2..5fe3abbd72ef0 100644 --- a/pkg/apis/extensions/install/install.go +++ b/pkg/apis/extensions/install/install.go @@ -93,6 +93,7 @@ func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper rootScoped := sets.NewString( "PodSecurityPolicy", "ThirdPartyResource", + "StorageClass", ) ignoredKinds := sets.NewString() diff --git a/pkg/apis/extensions/register.go b/pkg/apis/extensions/register.go index 48ba33d466245..480a38d97923e 100644 --- a/pkg/apis/extensions/register.go +++ b/pkg/apis/extensions/register.go @@ -75,5 +75,7 @@ func addKnownTypes(scheme *runtime.Scheme) { &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, + &StorageClass{}, + &StorageClassList{}, ) } diff --git a/pkg/apis/extensions/types.go b/pkg/apis/extensions/types.go index 8c73db72ea3a9..7488bcc7bb0ab 100644 --- a/pkg/apis/extensions/types.go +++ b/pkg/apis/extensions/types.go @@ -899,3 +899,45 @@ type NetworkPolicyList struct { Items []NetworkPolicy `json:"items"` } + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // ProvisionerType indicates the type of the provisioner. + ProvisionerType string `json:"provisionerType,omitempty"` + + // ProvisionerParameters holds the parameters for the provisioner that should + // create volumes of this storage class. + ProvisionerParameters map[string]string `json:"provisionerParameters,omitempty"` +} + +// ProvisionerType describes the type of a provisioner. + +// List of provisioners for StorageClass.ProvisionerType that are compiled into +// Kubernetes. Note that StorageClass.ProvisionerType is a free form string and +// it is not limited to values listed here. +const ( + ProvisionerTypeAWSEBS string = "kubernetes.io/aws-ebs" + ProvisionerTypeGCEPD string = "kubernetes.io/gce-pd" + ProvisionerTypeCinder string = "kubernetes.io/cinder" +) + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items"` +} diff --git a/pkg/apis/extensions/v1beta1/register.go b/pkg/apis/extensions/v1beta1/register.go index 91c1c48687401..0dce6f07ce6f9 100644 --- a/pkg/apis/extensions/v1beta1/register.go +++ b/pkg/apis/extensions/v1beta1/register.go @@ -63,6 +63,8 @@ func addKnownTypes(scheme *runtime.Scheme) { &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, + &StorageClass{}, + &StorageClassList{}, ) // Add the watch version that applies versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/pkg/apis/extensions/v1beta1/types.go b/pkg/apis/extensions/v1beta1/types.go index 55719fdbae851..d621df5b19b1f 100644 --- a/pkg/apis/extensions/v1beta1/types.go +++ b/pkg/apis/extensions/v1beta1/types.go @@ -1196,3 +1196,47 @@ type NetworkPolicyList struct { // Items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty"` + + // ProvisionerType indicates the type of the provisioner. + ProvisionerType string `json:"provisionerType,omitempty"` + + // ProvisionerParameters holds the parameters for the provisioner that should + // create volumes of this storage class. + ProvisionerParameters map[string]string `json:"provisionerParameters,omitempty"` +} + +// ProvisionerType describes the type of a provisioner. + +// List of provisioners for StorageClass.ProvisionerType that are compiled into +// Kubernetes. Note that StorageClass.ProvisionerType is a free form string and +// it is not limited to values listed here. +const ( + ProvisionerTypeAWSEBS string = "kubernetes.io/aws-ebs" + ProvisionerTypeGCEPD string = "kubernetes.io/gce-pd" + ProvisionerTypeCinder string = "kubernetes.io/cinder" +) + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items"` +} diff --git a/pkg/apis/extensions/validation/validation.go b/pkg/apis/extensions/validation/validation.go index c13d747c8d050..73bd12702a867 100644 --- a/pkg/apis/extensions/validation/validation.go +++ b/pkg/apis/extensions/validation/validation.go @@ -741,3 +741,52 @@ func ValidateNetworkPolicyUpdate(update, old *extensions.NetworkPolicy) field.Er } return allErrs } + +// ValidateStorageClass validates a StorageClass. +func ValidateStorageClass(storageClass *extensions.StorageClass) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&storageClass.ObjectMeta, false, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata")) + allErrs = append(allErrs, validateProvisionerType(storageClass.ProvisionerType, field.NewPath("provisionerType"))...) + allErrs = append(allErrs, validateProvisionerParameters(storageClass.ProvisionerParameters, field.NewPath("provisionerParameters"))...) + + return allErrs +} + +// ValidateStorageClassUpdate tests if required fields in the StorageClass are set. +func ValidateStorageClassUpdate(storageClass, oldStorageClass *extensions.StorageClass) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&storageClass.ObjectMeta, &oldStorageClass.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, validateProvisionerType(storageClass.ProvisionerType, field.NewPath("provisionerType"))...) + allErrs = append(allErrs, validateProvisionerParameters(storageClass.ProvisionerParameters, field.NewPath("provisionerParameters"))...) + + return allErrs +} + +func validateProvisionerType(provisionerType string, fldPath *field.Path) field.ErrorList { + // provisionerType must be a valid qualified name + allErrs := field.ErrorList{} + if len(provisionerType) > 0 { + for _, msg := range validation.IsQualifiedName(strings.ToLower(provisionerType)) { + allErrs = append(allErrs, field.Invalid(fldPath, provisionerType, msg)) + } + } + return allErrs +} + +const maxProvisionerParameterSize = 256 * (1 << 10) // 256 kB +func validateProvisionerParameters(params map[string]string, fldPath *field.Path) field.ErrorList { + // provisionerParameter keys must be valid qualified names + // provisionerParameter must be smaller than 256 kB + var totalSize int64 + allErrs := field.ErrorList{} + + for k, v := range params { + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + + if totalSize > maxProvisionerParameterSize { + allErrs = append(allErrs, field.TooLong(fldPath, "", maxProvisionerParameterSize)) + } + return allErrs +} diff --git a/pkg/apis/extensions/validation/validation_test.go b/pkg/apis/extensions/validation/validation_test.go index 6dc72c8e355ca..e316756bd5b2c 100644 --- a/pkg/apis/extensions/validation/validation_test.go +++ b/pkg/apis/extensions/validation/validation_test.go @@ -1929,3 +1929,76 @@ func newBool(val bool) *bool { *p = val return p } + +func TestValidateStorageClass(t *testing.T) { + successCases := []extensions.StorageClass{ + { + // Empty type and parameters + ObjectMeta: api.ObjectMeta{Name: "foo"}, + ProvisionerType: "", + ProvisionerParameters: map[string]string{}, + }, + { + // nil parameters + ObjectMeta: api.ObjectMeta{Name: "foo"}, + ProvisionerType: "", + }, + { + // some parameters + ObjectMeta: api.ObjectMeta{Name: "foo"}, + ProvisionerType: "kubernetes.io/foo-provisioner", + ProvisionerParameters: map[string]string{ + "kubernetes.io/foo-parameter": "free/form/string", + "foo-parameter": "free-form-string", + "foo-parameter2": "{\"embeded\": \"json\", \"with\": {\"structures\":\"inside\"}}", + }, + }, + } + + // Success cases are expected to pass validation. + for k, v := range successCases { + if errs := ValidateStorageClass(&v); len(errs) != 0 { + t.Errorf("Expected success for %d, got %v", k, errs) + } + } + + // generate a map longer than maxProvisionerParameterSize + longParameters := make(map[string]string) + totalSize := 0 + for totalSize < maxProvisionerParameterSize { + k := fmt.Sprintf("param/%d", totalSize) + v := fmt.Sprintf("value-%d", totalSize) + longParameters[k] = v + totalSize = totalSize + len(k) + len(v) + } + + errorCases := map[string]extensions.StorageClass{ + "namespace is present": { + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + ProvisionerType: "kubernetes.io/foo-provisioner", + }, + "invalid provisionerType": { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + ProvisionerType: "kubernetes.io/invalid/provisioner", + }, + "invalid parameter name": { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + ProvisionerType: "kubernetes.io/foo", + ProvisionerParameters: map[string]string{ + "invalid/parameter/name": "value", + }, + }, + "too long parameters": { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + ProvisionerType: "kubernetes.io/foo", + ProvisionerParameters: longParameters, + }, + } + + // Error cases are not expected to pass validation. + for testName, storageClass := range errorCases { + if errs := ValidateStorageClass(&storageClass); len(errs) == 0 { + t.Errorf("Expected failure for test: %s", testName) + } + } +} diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go index dd1ed2903e36e..b795deed1920c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go @@ -29,3 +29,5 @@ type PodSecurityPolicyExpansion interface{} type ThirdPartyResourceExpansion interface{} type ReplicaSetExpansion interface{} + +type StorageClassExpansion interface{} diff --git a/pkg/client/unversioned/extensions.go b/pkg/client/unversioned/extensions.go index 39b3408776158..c83846f924165 100644 --- a/pkg/client/unversioned/extensions.go +++ b/pkg/client/unversioned/extensions.go @@ -37,6 +37,7 @@ type ExtensionsInterface interface { ThirdPartyResourceNamespacer ReplicaSetsNamespacer PodSecurityPoliciesInterface + StorageClassesInterface } // ExtensionsClient is used to interact with experimental Kubernetes features. @@ -82,6 +83,10 @@ func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } +func (c *ExtensionsClient) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + // NewExtensions creates a new ExtensionsClient for the given config. This client // provides access to experimental Kubernetes features. // Features of Extensions group are not supported and may be changed or removed in diff --git a/pkg/client/unversioned/storageclasses.go b/pkg/client/unversioned/storageclasses.go new file mode 100644 index 0000000000000..7c5c0b4e72dd9 --- /dev/null +++ b/pkg/client/unversioned/storageclasses.go @@ -0,0 +1,87 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +type StorageClassesInterface interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + List(opts api.ListOptions) (*extensions.StorageClassList, error) + Get(name string) (*extensions.StorageClass, error) + Create(storageClass *extensions.StorageClass) (*extensions.StorageClass, error) + Update(storageClass *extensions.StorageClass) (*extensions.StorageClass, error) + Delete(name string) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client *ExtensionsClient +} + +func newStorageClasses(c *ExtensionsClient) *storageClasses { + return &storageClasses{c} +} + +func (c *storageClasses) List(opts api.ListOptions) (result *extensions.StorageClassList, err error) { + result = &extensions.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +func (c *storageClasses) Get(name string) (result *extensions.StorageClass, err error) { + result = &extensions.StorageClass{} + err = c.client.Get().Resource("storageClasses").Name(name).Do().Into(result) + return +} + +func (c *storageClasses) Create(storageClass *extensions.StorageClass) (result *extensions.StorageClass, err error) { + result = &extensions.StorageClass{} + err = c.client.Post().Resource("storageClasses").Body(storageClass).Do().Into(result) + return +} + +func (c *storageClasses) Update(storageClass *extensions.StorageClass) (result *extensions.StorageClass, err error) { + result = &extensions.StorageClass{} + err = c.client.Put().Resource("storageClasses").Name(storageClass.Name).Body(storageClass).Do().Into(result) + return +} + +func (c *storageClasses) Delete(name string) error { + return c.client.Delete().Resource("storageClasses").Name(name).Do().Error() +} + +func (c *storageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("storageClasses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/pkg/client/unversioned/storageclasses_test.go b/pkg/client/unversioned/storageclasses_test.go new file mode 100644 index 0000000000000..8368fb59d48d5 --- /dev/null +++ b/pkg/client/unversioned/storageclasses_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getStorageClassResourceName() string { + return "storageclasses" +} + +func TestListStorageClasses(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getStorageClassResourceName(), "", ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &extensions.StorageClassList{ + Items: []extensions.StorageClass{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + ProvisionerType: "aaa", + }, + }, + }, + }, + } + receivedSCList, err := c.Setup(t).Extensions().StorageClasses().List(api.ListOptions{}) + c.Validate(t, receivedSCList, err) +} + +func TestGetStorageClass(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Extensions.ResourcePath(getStorageClassResourceName(), "", "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + ProvisionerType: "aaa", + }, + }, + } + receivedSC, err := c.Setup(t).Extensions().StorageClasses().Get("foo") + c.Validate(t, receivedSC, err) +} + +func TestGetStorageClassWithNoName(t *testing.T) { + c := &simple.Client{Error: true} + receivedSC, err := c.Setup(t).Extensions().StorageClasses().Get("") + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedSC, err) +} + +func TestUpdateStorageClass(t *testing.T) { + requestSC := &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + ProvisionerType: "aaa", + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getStorageClassResourceName(), "", "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + ProvisionerType: "aaa", + }, + }, + } + receivedSC, err := c.Setup(t).Extensions().StorageClasses().Update(requestSC) + c.Validate(t, receivedSC, err) +} + +func TestDeleteStorageClass(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Extensions.ResourcePath(getStorageClassResourceName(), "", "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Extensions().StorageClasses().Delete("foo") + c.Validate(t, nil, err) +} + +func TestCreateStorageClass(t *testing.T) { + requestSC := &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + ProvisionerType: "aaa", + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Extensions.ResourcePath(getStorageClassResourceName(), "", ""), Body: requestSC, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + ProvisionerType: "aaa", + }, + }, + } + receivedSC, err := c.Setup(t).Extensions().StorageClasses().Create(requestSC) + c.Validate(t, receivedSC, err) +} diff --git a/pkg/client/unversioned/testclient/fake_storage_classes.go b/pkg/client/unversioned/testclient/fake_storage_classes.go new file mode 100644 index 0000000000000..0a6bb65f4b433 --- /dev/null +++ b/pkg/client/unversioned/testclient/fake_storage_classes.go @@ -0,0 +1,74 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + kclientlib "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeStorageClasses implements StorageClassInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the method you want to test easier. +type FakeStorageClasses struct { + Fake *FakeExperimental +} + +// Ensure statically that FakeStorageClasses implements StorageClassInterface. +var _ kclientlib.StorageClassInterface = &FakeStorageClasses{} + +func (c *FakeStorageClasses) Get(name string) (*extensions.StorageClass, error) { + obj, err := c.Fake.Invokes(NewGetAction("storageclasses", "", name), &extensions.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClass), err +} + +func (c *FakeStorageClasses) List(opts api.ListOptions) (*extensions.StorageClassList, error) { + obj, err := c.Fake.Invokes(NewListAction("storageclasses", "", opts), &extensions.StorageClassList{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClassList), err +} + +func (c *FakeStorageClasses) Create(np *extensions.StorageClass) (*extensions.StorageClass, error) { + obj, err := c.Fake.Invokes(NewCreateAction("storageclasses", "", np), &extensions.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClass), err +} + +func (c *FakeStorageClasses) Update(np *extensions.StorageClass) (*extensions.StorageClass, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("storageclasses", "", np), &extensions.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClass), err +} + +func (c *FakeStorageClasses) Delete(name string) error { + _, err := c.Fake.Invokes(NewDeleteAction("storageclasses", "", name), &extensions.StorageClass{}) + return err +} + +func (c *FakeStorageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("storageclasses", "", opts)) +} diff --git a/pkg/client/unversioned/testclient/testclient.go b/pkg/client/unversioned/testclient/testclient.go index d4f1eb70c599e..ca6c71981385e 100644 --- a/pkg/client/unversioned/testclient/testclient.go +++ b/pkg/client/unversioned/testclient/testclient.go @@ -394,6 +394,10 @@ func (c *FakeExperimental) NetworkPolicies(namespace string) client.NetworkPolic return &FakeNetworkPolicies{Fake: c, Namespace: namespace} } +func (c *FakeExperimental) StorageClasses() client.StorageClassInterface { + return &FakeStorageClasses{Fake: c} +} + func NewSimpleFakeRbac(objects ...runtime.Object) *FakeRbac { return &FakeRbac{Fake: NewSimpleFake(objects...)} } diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index b1f17830dca9f..504907c69fa30 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -433,6 +433,7 @@ var roleColumns = []string{"NAME", "AGE"} var roleBindingColumns = []string{"NAME", "AGE"} var clusterRoleColumns = []string{"NAME", "AGE"} var clusterRoleBindingColumns = []string{"NAME", "AGE"} +var storageClassColumns = []string{"NAME", "TYPE"} // TODO: consider having 'KIND' for third party resource data var thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"} @@ -510,6 +511,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(clusterRoleColumns, printClusterRoleList) h.Handler(clusterRoleBindingColumns, printClusterRoleBinding) h.Handler(clusterRoleBindingColumns, printClusterRoleBindingList) + h.Handler(storageClassColumns, printStorageClass) + h.Handler(storageClassColumns, printStorageClassList) } func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { @@ -2004,6 +2007,32 @@ func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, opt return nil } +func printStorageClass(sc *extensions.StorageClass, w io.Writer, options PrintOptions) error { + name := sc.Name + provtype := sc.ProvisionerType + + if _, err := fmt.Fprintf(w, "%s\t%s\t", name, provtype); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(sc.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, sc.Labels)); err != nil { + return err + } + + return nil +} + +func printStorageClassList(scList *extensions.StorageClassList, w io.Writer, options PrintOptions) error { + for _, sc := range scList.Items { + if err := printStorageClass(&sc, w, options); err != nil { + return err + } + } + return nil +} + func AppendLabels(itemLabels map[string]string, columnLabels []string) string { var buffer bytes.Buffer diff --git a/pkg/master/master.go b/pkg/master/master.go index 0ff460f5972a9..85a3a36e31cd4 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -100,6 +100,7 @@ import ( serviceetcd "k8s.io/kubernetes/pkg/registry/service/etcd" ipallocator "k8s.io/kubernetes/pkg/registry/service/ipallocator" serviceaccountetcd "k8s.io/kubernetes/pkg/registry/serviceaccount/etcd" + storageclassetcd "k8s.io/kubernetes/pkg/registry/storageclass/etcd" thirdpartyresourceetcd "k8s.io/kubernetes/pkg/registry/thirdpartyresource/etcd" "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" thirdpartyresourcedataetcd "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd" @@ -883,6 +884,10 @@ func (m *Master) getExtensionResources(c *Config) map[string]rest.Storage { if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("networkpolicies")) { storage["networkpolicies"] = networkPolicyStorage } + storageClassStorage := storageclassetcd.NewREST(restOptions("storageclasses")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("storageclasses")) { + storage["storageclasses"] = storageClassStorage + } return storage } diff --git a/pkg/registry/storageclass/doc.go b/pkg/registry/storageclass/doc.go new file mode 100644 index 0000000000000..98bc486ead23b --- /dev/null +++ b/pkg/registry/storageclass/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageclass diff --git a/pkg/registry/storageclass/etcd/etcd.go b/pkg/registry/storageclass/etcd/etcd.go new file mode 100644 index 0000000000000..2f46fd4f01045 --- /dev/null +++ b/pkg/registry/storageclass/etcd/etcd.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/registry/generic/registry" + "k8s.io/kubernetes/pkg/registry/storageclass" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" +) + +type REST struct { + *registry.Store +} + +// NewREST returns a RESTStorage object that will work against persistent volumes. +func NewREST(opts generic.RESTOptions) *REST { + prefix := "/storageclasses" + + newListFunc := func() runtime.Object { return &extensions.StorageClassList{} } + storageInterface := opts.Decorator( + opts.Storage, + 100, + &extensions.StorageClass{}, + prefix, + storageclass.Strategy, + newListFunc, + storage.NoTriggerPublisher, + ) + + store := ®istry.Store{ + NewFunc: func() runtime.Object { return &extensions.StorageClass{} }, + NewListFunc: newListFunc, + KeyRootFunc: func(ctx api.Context) string { + return prefix + }, + KeyFunc: func(ctx api.Context, name string) (string, error) { + return registry.NoNamespaceKeyFunc(ctx, prefix, name) + }, + ObjectNameFunc: func(obj runtime.Object) (string, error) { + return obj.(*extensions.StorageClass).Name, nil + }, + PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { + return storageclass.MatchStorageClasses(label, field) + }, + QualifiedResource: api.Resource("storageclasses"), + DeleteCollectionWorkers: opts.DeleteCollectionWorkers, + + CreateStrategy: storageclass.Strategy, + UpdateStrategy: storageclass.Strategy, + DeleteStrategy: storageclass.Strategy, + ReturnDeletedObject: true, + + Storage: storageInterface, + } + + return &REST{store} +} diff --git a/pkg/registry/storageclass/etcd/etcd_test.go b/pkg/registry/storageclass/etcd/etcd_test.go new file mode 100644 index 0000000000000..928d3fcf83e7b --- /dev/null +++ b/pkg/registry/storageclass/etcd/etcd_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/registry/registrytest" + "k8s.io/kubernetes/pkg/runtime" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" +) + +func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) { + etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName) + restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1} + storageClassStorage := NewREST(restOptions) + return storageClassStorage, server +} + +func validNewStorageClass(name string) *extensions.StorageClass { + return &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + ProvisionerType: "kubernetes.io/aws-ebs", + ProvisionerParameters: map[string]string{ + "foo": "bar", + }, + } +} + +func validChangedStorageClass() *extensions.StorageClass { + return validNewStorageClass("foo") +} + +func TestCreate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + storageClass := validNewStorageClass("foo") + storageClass.ObjectMeta = api.ObjectMeta{GenerateName: "foo"} + test.TestCreate( + // valid + storageClass, + // invalid + &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{Name: "*BadName!"}, + }, + ) +} + +func TestUpdate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestUpdate( + // valid + validNewStorageClass("foo"), + // updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*extensions.StorageClass) + object.ProvisionerParameters = map[string]string{"bar": "baz"} + return object + }, + ) +} + +func TestDelete(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope().ReturnDeletedObject() + test.TestDelete(validNewStorageClass("foo")) +} + +func TestGet(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestGet(validNewStorageClass("foo")) +} + +func TestList(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestList(validNewStorageClass("foo")) +} + +func TestWatch(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestWatch( + validNewStorageClass("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{ + {"metadata.name": "foo"}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + }, + ) +} diff --git a/pkg/registry/storageclass/strategy.go b/pkg/registry/storageclass/strategy.go new file mode 100644 index 0000000000000..a8ebac5d0a47f --- /dev/null +++ b/pkg/registry/storageclass/strategy.go @@ -0,0 +1,98 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageclass + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/extensions/validation" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// storageClassStrategy implements behavior for StorageClass objects +type storageClassStrategy struct { + runtime.ObjectTyper + api.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating +// StorageClass objects via the REST API. +var Strategy = storageClassStrategy{api.Scheme, api.SimpleNameGenerator} + +func (storageClassStrategy) NamespaceScoped() bool { + return false +} + +// ResetBeforeCreate clears the Status field which is not allowed to be set by end users on creation. +func (storageClassStrategy) PrepareForCreate(obj runtime.Object) { + _ = obj.(*extensions.StorageClass) +} + +func (storageClassStrategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { + storageClass := obj.(*extensions.StorageClass) + return validation.ValidateStorageClass(storageClass) +} + +// Canonicalize normalizes the object after validation. +func (storageClassStrategy) Canonicalize(obj runtime.Object) { +} + +func (storageClassStrategy) AllowCreateOnUpdate() bool { + return false +} + +// PrepareForUpdate sets the Status fields which is not allowed to be set by an end user updating a PV +func (storageClassStrategy) PrepareForUpdate(obj, old runtime.Object) { + _ = obj.(*extensions.StorageClass) + _ = old.(*extensions.StorageClass) +} + +func (storageClassStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { + errorList := validation.ValidateStorageClass(obj.(*extensions.StorageClass)) + return append(errorList, validation.ValidateStorageClassUpdate(obj.(*extensions.StorageClass), old.(*extensions.StorageClass))...) +} + +func (storageClassStrategy) AllowUnconditionalUpdate() bool { + return true +} + +// MatchStorageClass returns a generic matcher for a given label and field selector. +func MatchStorageClasses(label labels.Selector, field fields.Selector) generic.Matcher { + return &generic.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + cls, ok := obj.(*extensions.StorageClass) + if !ok { + return nil, nil, fmt.Errorf("given object is not of type StorageClass") + } + + return labels.Set(cls.ObjectMeta.Labels), StorageClassToSelectableFields(cls), nil + }, + } +} + +// StorageClassToSelectableFields returns a label set that represents the object +func StorageClassToSelectableFields(storageClass *extensions.StorageClass) fields.Set { + return generic.ObjectMetaFieldsSet(storageClass.ObjectMeta, false) +} diff --git a/pkg/registry/storageclass/strategy_test.go b/pkg/registry/storageclass/strategy_test.go new file mode 100644 index 0000000000000..a29433244f8d6 --- /dev/null +++ b/pkg/registry/storageclass/strategy_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageclass + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestStorageClassStrategy(t *testing.T) { + ctx := api.NewDefaultContext() + if Strategy.NamespaceScoped() { + t.Errorf("StorageClass must not be namespace scoped") + } + if Strategy.AllowCreateOnUpdate() { + t.Errorf("StorageClass should not allow create on update") + } + + storageClass := &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{ + Name: "valid-class", + }, + ProvisionerType: "kubernetes.io/aws-ebs", + ProvisionerParameters: map[string]string{ + "foo": "bar", + }, + } + + Strategy.PrepareForCreate(storageClass) + + errs := Strategy.Validate(ctx, storageClass) + if len(errs) != 0 { + t.Errorf("unexpected error validating %v", errs) + } + + newStorageClass := &extensions.StorageClass{ + ObjectMeta: api.ObjectMeta{ + Name: "valid-class-2", + ResourceVersion: "4", + }, + ProvisionerType: "kubernetes.io/aws-ebs", + ProvisionerParameters: map[string]string{ + "foo": "bar", + }, + } + + Strategy.PrepareForUpdate(newStorageClass, storageClass) + + errs = Strategy.ValidateUpdate(ctx, newStorageClass, storageClass) + if len(errs) == 0 { + t.Errorf("Expected a validation error") + } +} From ca3fefeee243ddfe967414fb1186f66b124935ae Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:03 +0200 Subject: [PATCH 02/11] Regenerate all API objects --- pkg/apis/extensions/types.generated.go | 838 ++++++++++++++++++ pkg/apis/extensions/v1beta1/generated.pb.go | 507 ++++++++++- pkg/apis/extensions/v1beta1/generated.proto | 28 + .../extensions/v1beta1/types.generated.go | 838 ++++++++++++++++++ pkg/apis/extensions/v1beta1/types.go | 10 +- .../v1beta1/types_swagger_doc_generated.go | 21 + .../unversioned/extensions_client.go | 5 + .../fake/fake_extensions_client.go | 4 + .../unversioned/fake/fake_storageclass.go | 109 +++ .../extensions/unversioned/storageclass.go | 141 +++ .../extensions/v1beta1/extensions_client.go | 5 + .../v1beta1/fake/fake_extensions_client.go | 4 + .../v1beta1/fake/fake_storageclass.go | 109 +++ .../extensions/v1beta1/generated_expansion.go | 2 + .../typed/extensions/v1beta1/storageclass.go | 141 +++ 15 files changed, 2749 insertions(+), 13 deletions(-) create mode 100644 pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_storageclass.go create mode 100644 pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/storageclass.go create mode 100644 pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_storageclass.go create mode 100644 pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/storageclass.go diff --git a/pkg/apis/extensions/types.generated.go b/pkg/apis/extensions/types.generated.go index fd982347aed7c..de0c122b61c92 100644 --- a/pkg/apis/extensions/types.generated.go +++ b/pkg/apis/extensions/types.generated.go @@ -15508,6 +15508,725 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = x.ProvisionerType != "" + yyq2[2] = len(x.ProvisionerParameters) != 0 + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProvisionerType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisionerType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProvisionerType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ProvisionerParameters == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + z.F.EncMapStringStringV(x.ProvisionerParameters, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisionerParameters")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProvisionerParameters == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.ProvisionerParameters, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "provisionerType": + if r.TryDecodeAsNil() { + x.ProvisionerType = "" + } else { + x.ProvisionerType = string(r.DecodeString()) + } + case "provisionerParameters": + if r.TryDecodeAsNil() { + x.ProvisionerParameters = nil + } else { + yyv6 := &x.ProvisionerParameters + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv11 := &x.ObjectMeta + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProvisionerType = "" + } else { + x.ProvisionerType = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProvisionerParameters = nil + } else { + yyv13 := &x.ProvisionerParameters + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -17989,3 +18708,122 @@ func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978. *v = yyv1 } } + +func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]StorageClass, yyrl1) + } + } else { + yyv1 = make([]StorageClass, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StorageClass{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StorageClass{}) // var yyz1 StorageClass + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/pkg/apis/extensions/v1beta1/generated.pb.go b/pkg/apis/extensions/v1beta1/generated.pb.go index be3acb91d83ae..4ec34065ab340 100644 --- a/pkg/apis/extensions/v1beta1/generated.pb.go +++ b/pkg/apis/extensions/v1beta1/generated.pb.go @@ -88,6 +88,8 @@ limitations under the License. Scale ScaleSpec ScaleStatus + StorageClass + StorageClassList SubresourceReference SupplementalGroupsStrategyOptions ThirdPartyResource @@ -306,6 +308,12 @@ func (*ScaleSpec) ProtoMessage() {} func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} + func (m *SubresourceReference) Reset() { *m = SubresourceReference{} } func (*SubresourceReference) ProtoMessage() {} @@ -388,6 +396,8 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus") + proto.RegisterType((*StorageClass)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.StorageClassList") proto.RegisterType((*SubresourceReference)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SubresourceReference") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResource") @@ -2830,6 +2840,91 @@ func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *StorageClass) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClass) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n72, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n72 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ProvisionerType))) + i += copy(data[i:], m.ProvisionerType) + if len(m.ProvisionerParameters) > 0 { + for k := range m.ProvisionerParameters { + data[i] = 0x1a + i++ + v := m.ProvisionerParameters[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClassList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n73, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n73 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *SubresourceReference) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -2916,11 +3011,11 @@ func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n72, err := m.ObjectMeta.MarshalTo(data[i:]) + n74, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n72 + i += n74 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Description))) @@ -2958,11 +3053,11 @@ func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n73, err := m.ObjectMeta.MarshalTo(data[i:]) + n75, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n73 + i += n75 if m.Data != nil { data[i] = 0x12 i++ @@ -2990,11 +3085,11 @@ func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n74, err := m.ListMeta.MarshalTo(data[i:]) + n76, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n74 + i += n76 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3028,11 +3123,11 @@ func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(data[i:]) + n77, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n75 + i += n77 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3933,6 +4028,38 @@ func (m *ScaleStatus) Size() (n int) { return n } +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProvisionerType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ProvisionerParameters) > 0 { + for k, v := range m.ProvisionerParameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *SubresourceReference) Size() (n int) { var l int _ = l @@ -4797,6 +4924,39 @@ func (this *ScaleStatus) String() string { }, "") return s } +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForProvisionerParameters := make([]string, 0, len(this.ProvisionerParameters)) + for k := range this.ProvisionerParameters { + keysForProvisionerParameters = append(keysForProvisionerParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProvisionerParameters) + mapStringForProvisionerParameters := "map[string]string{" + for _, k := range keysForProvisionerParameters { + mapStringForProvisionerParameters += fmt.Sprintf("%v: %v,", k, this.ProvisionerParameters[k]) + } + mapStringForProvisionerParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ProvisionerType:` + fmt.Sprintf("%v", this.ProvisionerType) + `,`, + `ProvisionerParameters:` + mapStringForProvisionerParameters + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *SubresourceReference) String() string { if this == nil { return "nil" @@ -12929,6 +13089,337 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { } return nil } +func (m *StorageClass) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProvisionerType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProvisionerType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProvisionerParameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.ProvisionerParameters == nil { + m.ProvisionerParameters = make(map[string]string) + } + m.ProvisionerParameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SubresourceReference) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 diff --git a/pkg/apis/extensions/v1beta1/generated.proto b/pkg/apis/extensions/v1beta1/generated.proto index 46df0d6be0ce0..2cd20d3e950be 100644 --- a/pkg/apis/extensions/v1beta1/generated.proto +++ b/pkg/apis/extensions/v1beta1/generated.proto @@ -942,6 +942,34 @@ message ScaleStatus { optional string targetSelector = 3; } +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // ProvisionerType indicates the type of the provisioner. + optional string provisionerType = 2; + + // ProvisionerParameters holds the parameters for the provisioner that should + // create volumes of this storage class. + map provisionerParameters = 3; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + // SubresourceReference contains enough information to let you inspect or modify the referred subresource. message SubresourceReference { // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds diff --git a/pkg/apis/extensions/v1beta1/types.generated.go b/pkg/apis/extensions/v1beta1/types.generated.go index a0220de07c89c..236c915c78228 100644 --- a/pkg/apis/extensions/v1beta1/types.generated.go +++ b/pkg/apis/extensions/v1beta1/types.generated.go @@ -20980,6 +20980,725 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = x.ProvisionerType != "" + yyq2[2] = len(x.ProvisionerParameters) != 0 + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProvisionerType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisionerType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProvisionerType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ProvisionerParameters == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + z.F.EncMapStringStringV(x.ProvisionerParameters, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisionerParameters")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProvisionerParameters == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.ProvisionerParameters, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "provisionerType": + if r.TryDecodeAsNil() { + x.ProvisionerType = "" + } else { + x.ProvisionerType = string(r.DecodeString()) + } + case "provisionerParameters": + if r.TryDecodeAsNil() { + x.ProvisionerParameters = nil + } else { + yyv6 := &x.ProvisionerParameters + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv11 := &x.ObjectMeta + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProvisionerType = "" + } else { + x.ProvisionerType = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProvisionerParameters = nil + } else { + yyv13 := &x.ProvisionerParameters + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -23937,3 +24656,122 @@ func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978. *v = yyv1 } } + +func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]StorageClass, yyrl1) + } + } else { + yyv1 = make([]StorageClass, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StorageClass{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StorageClass{}) // var yyz1 StorageClass + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/pkg/apis/extensions/v1beta1/types.go b/pkg/apis/extensions/v1beta1/types.go index d621df5b19b1f..53d64ac0a6661 100644 --- a/pkg/apis/extensions/v1beta1/types.go +++ b/pkg/apis/extensions/v1beta1/types.go @@ -1209,14 +1209,14 @@ type StorageClass struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // ProvisionerType indicates the type of the provisioner. - ProvisionerType string `json:"provisionerType,omitempty"` + ProvisionerType string `json:"provisionerType,omitempty" protobuf:"bytes,2,opt,name=provisionerType"` // ProvisionerParameters holds the parameters for the provisioner that should // create volumes of this storage class. - ProvisionerParameters map[string]string `json:"provisionerParameters,omitempty"` + ProvisionerParameters map[string]string `json:"provisionerParameters,omitempty" protobuf:"bytes,3,rep,name=provisionerParameters"` } // ProvisionerType describes the type of a provisioner. @@ -1235,8 +1235,8 @@ type StorageClassList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of StorageClasses - Items []StorageClass `json:"items"` + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go index 182ff78c62d6d..0d0267455e641 100644 --- a/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go +++ b/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go @@ -674,6 +674,27 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "provisionerType": "ProvisionerType indicates the type of the provisioner.", + "provisionerParameters": "ProvisionerParameters holds the parameters for the provisioner that should create volumes of this storage class.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + var map_SubresourceReference = map[string]string{ "": "SubresourceReference contains enough information to let you inspect or modify the referred subresource.", "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go index 5454ea783ed28..7ad44194f90f8 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go @@ -30,6 +30,7 @@ type ExtensionsInterface interface { PodSecurityPoliciesGetter ReplicaSetsGetter ScalesGetter + StorageClassesGetter ThirdPartyResourcesGetter } @@ -62,6 +63,10 @@ func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } +func (c *ExtensionsClient) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { return newThirdPartyResources(c) } diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_extensions_client.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_extensions_client.go index 809e80ed8af9a..cb3bc074d7343 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_extensions_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_extensions_client.go @@ -50,6 +50,10 @@ func (c *FakeExtensions) Scales(namespace string) unversioned.ScaleInterface { return &FakeScales{c, namespace} } +func (c *FakeExtensions) StorageClasses() unversioned.StorageClassInterface { + return &FakeStorageClasses{c} +} + func (c *FakeExtensions) ThirdPartyResources() unversioned.ThirdPartyResourceInterface { return &FakeThirdPartyResources{c} } diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_storageclass.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_storageclass.go new file mode 100644 index 0000000000000..bc9b856b03a45 --- /dev/null +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_storageclass.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeStorageClasses implements StorageClassInterface +type FakeStorageClasses struct { + Fake *FakeExtensions +} + +var storageclassesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "storageclasses"} + +func (c *FakeStorageClasses) Create(storageClass *extensions.StorageClass) (result *extensions.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(storageclassesResource, storageClass), &extensions.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClass), err +} + +func (c *FakeStorageClasses) Update(storageClass *extensions.StorageClass) (result *extensions.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(storageclassesResource, storageClass), &extensions.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClass), err +} + +func (c *FakeStorageClasses) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(storageclassesResource, name), &extensions.StorageClass{}) + return err +} + +func (c *FakeStorageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(storageclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &extensions.StorageClassList{}) + return err +} + +func (c *FakeStorageClasses) Get(name string) (result *extensions.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(storageclassesResource, name), &extensions.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClass), err +} + +func (c *FakeStorageClasses) List(opts api.ListOptions) (result *extensions.StorageClassList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(storageclassesResource, opts), &extensions.StorageClassList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &extensions.StorageClassList{} + for _, item := range obj.(*extensions.StorageClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *FakeStorageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(storageclassesResource, opts)) +} + +// Patch applies the patch and returns the patched storageClass. +func (c *FakeStorageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &extensions.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.StorageClass), err +} diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/storageclass.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/storageclass.go new file mode 100644 index 0000000000000..0a145b0fde86e --- /dev/null +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/storageclass.go @@ -0,0 +1,141 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*extensions.StorageClass) (*extensions.StorageClass, error) + Update(*extensions.StorageClass) (*extensions.StorageClass, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.StorageClass, error) + List(opts api.ListOptions) (*extensions.StorageClassList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client *ExtensionsClient +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *ExtensionsClient) *storageClasses { + return &storageClasses{ + client: c, + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *extensions.StorageClass) (result *extensions.StorageClass, err error) { + result = &extensions.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *extensions.StorageClass) (result *extensions.StorageClass, err error) { + result = &extensions.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string) (result *extensions.StorageClass, err error) { + result = &extensions.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts api.ListOptions) (result *extensions.StorageClassList, err error) { + result = &extensions.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("storageclasses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.StorageClass, err error) { + result = &extensions.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/extensions_client.go b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/extensions_client.go index b21a305779e9c..aaff6f5cc93d5 100644 --- a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/extensions_client.go +++ b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/extensions_client.go @@ -33,6 +33,7 @@ type ExtensionsInterface interface { PodSecurityPoliciesGetter ReplicaSetsGetter ScalesGetter + StorageClassesGetter ThirdPartyResourcesGetter } @@ -73,6 +74,10 @@ func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } +func (c *ExtensionsClient) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { return newThirdPartyResources(c) } diff --git a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_extensions_client.go b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_extensions_client.go index 2c5f30c30b6ab..da014f724dd74 100644 --- a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -58,6 +58,10 @@ func (c *FakeExtensions) Scales(namespace string) v1beta1.ScaleInterface { return &FakeScales{c, namespace} } +func (c *FakeExtensions) StorageClasses() v1beta1.StorageClassInterface { + return &FakeStorageClasses{c} +} + func (c *FakeExtensions) ThirdPartyResources() v1beta1.ThirdPartyResourceInterface { return &FakeThirdPartyResources{c} } diff --git a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_storageclass.go b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_storageclass.go new file mode 100644 index 0000000000000..e565d2fecfd5f --- /dev/null +++ b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake/fake_storageclass.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeStorageClasses implements StorageClassInterface +type FakeStorageClasses struct { + Fake *FakeExtensions +} + +var storageclassesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "storageclasses"} + +func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +func (c *FakeStorageClasses) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{}) + return err +} + +func (c *FakeStorageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(storageclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) + return err +} + +func (c *FakeStorageClasses) Get(name string) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +func (c *FakeStorageClasses) List(opts api.ListOptions) (result *v1beta1.StorageClassList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(storageclassesResource, opts), &v1beta1.StorageClassList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.StorageClassList{} + for _, item := range obj.(*v1beta1.StorageClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *FakeStorageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(storageclassesResource, opts)) +} + +// Patch applies the patch and returns the patched storageClass. +func (c *FakeStorageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(core.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} diff --git a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/generated_expansion.go b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/generated_expansion.go index 94f296b44ff13..4fde737d5e35a 100644 --- a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/generated_expansion.go +++ b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/generated_expansion.go @@ -33,3 +33,5 @@ type ReplicaSetExpansion interface{} type ScaleExpansion interface{} type ThirdPartyResourceExpansion interface{} + +type StorageClassExpansion interface{} diff --git a/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/storageclass.go b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/storageclass.go new file mode 100644 index 0000000000000..803fe9dadd263 --- /dev/null +++ b/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/storageclass.go @@ -0,0 +1,141 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.StorageClass, error) + List(opts api.ListOptions) (*v1beta1.StorageClassList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client *ExtensionsClient +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *ExtensionsClient) *storageClasses { + return &storageClasses{ + client: c, + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts api.ListOptions) (result *v1beta1.StorageClassList, err error) { + result = &v1beta1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("storageclasses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} From 2be2154b9c9bba20903644fe82b7bc7c33f431b7 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:04 +0200 Subject: [PATCH 03/11] Add new class annotation interpretation - when selecting existing PVs, PV class must match PVC class - provision a new volume only when there is no existing and matching PV (unit test 11-6 changed because of this) - during provisioning, copy class from PVC to PV (even if it is empty) --- .../volume/persistentvolume/controller.go | 12 +- .../persistentvolume/controller_base.go | 22 ++++ .../volume/persistentvolume/framework_test.go | 16 ++- .../volume/persistentvolume/index.go | 19 ++- .../volume/persistentvolume/index_test.go | 108 ++++++++++++++++++ .../volume/persistentvolume/provision_test.go | 23 ++-- 6 files changed, 168 insertions(+), 32 deletions(-) diff --git a/pkg/controller/volume/persistentvolume/controller.go b/pkg/controller/volume/persistentvolume/controller.go index abbd2a9cc53b9..2b5e8d8171fa1 100644 --- a/pkg/controller/volume/persistentvolume/controller.go +++ b/pkg/controller/volume/persistentvolume/controller.go @@ -72,9 +72,14 @@ const annBindCompleted = "pv.kubernetes.io/bind-completed" // pre-bound). Value of this annotation does not matter. const annBoundByController = "pv.kubernetes.io/bound-by-controller" -// annClass annotation represents a new field which instructs dynamic -// provisioning to choose a particular storage class (aka profile). -// Value of this annotation should be empty. +// annClass annotation represents a new field: +// - in PersistentVolumeClaim it represents required class to match. +// Only PersistentVolumes with the same class (i.e. annotation with the same +// value) can be bound to the claim. In case no such volume exists, the +// controller will provision a new one using StorageClass instance with +// the same name as the annotation value. +// - in PersistentVolume it represents storage class to which the persistent +// volume belongs. const annClass = "volume.alpha.kubernetes.io/storage-class" // This annotation is added to a PV that has been dynamically provisioned by @@ -1149,6 +1154,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa // Add annBoundByController (used in deleting the volume) setAnnotation(&volume.ObjectMeta, annBoundByController, "yes") setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName()) + setAnnotation(&volume.ObjectMeta, annClass, getClaimClass(claim)) // Try to create the PV object several times for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { diff --git a/pkg/controller/volume/persistentvolume/controller_base.go b/pkg/controller/volume/persistentvolume/controller_base.go index 88b5b139ac107..ff39034a82801 100644 --- a/pkg/controller/volume/persistentvolume/controller_base.go +++ b/pkg/controller/volume/persistentvolume/controller_base.go @@ -578,3 +578,25 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo } return true, nil } + +// getVolumeClass returns value of annClass annotation or empty string in case +// the annotation does not exist. +// TODO: change to PersistentVolume.Spec.Class value when this attribute is +// introduced. +func getVolumeClass(volume *api.PersistentVolume) string { + if class, found := volume.Annotations[annClass]; found { + return class + } + return "" +} + +// getClaimClass returns value of annClass annotation or empty string in case +// the annotation does not exist. +// TODO: change to PersistentVolumeClaim.Spec.Class value when this attribute is +// introduced. +func getClaimClass(claim *api.PersistentVolumeClaim) string { + if class, found := claim.Annotations[annClass]; found { + return class + } + return "" +} diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index cd78384daa17c..eeef10124e8d6 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -636,10 +636,13 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase a if len(annotations) > 0 { volume.Annotations = make(map[string]string) for _, a := range annotations { - if a != annDynamicallyProvisioned { - volume.Annotations[a] = "yes" - } else { + switch a { + case annDynamicallyProvisioned: volume.Annotations[a] = mockPluginName + case annClass: + volume.Annotations[a] = "gold" + default: + volume.Annotations[a] = "yes" } } } @@ -710,7 +713,12 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase api.Persiste if len(annotations) > 0 { claim.Annotations = make(map[string]string) for _, a := range annotations { - claim.Annotations[a] = "yes" + switch a { + case annClass: + claim.Annotations[a] = "gold" + default: + claim.Annotations[a] = "yes" + } } } return &claim diff --git a/pkg/controller/volume/persistentvolume/index.go b/pkg/controller/volume/persistentvolume/index.go index 60699e6e653e1..852a110887d23 100644 --- a/pkg/controller/volume/persistentvolume/index.go +++ b/pkg/controller/volume/persistentvolume/index.go @@ -20,6 +20,8 @@ import ( "fmt" "sort" + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/cache" @@ -126,11 +128,17 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo // filter out: // - volumes bound to another claim // - volumes whose labels don't match the claim's selector, if specified + // - volumes in Class that is not requested if volume.Spec.ClaimRef != nil { continue } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { continue } + claimClass := getClaimClass(claim) + glog.V(1).Infof("JSAF: inspecting %s: wants %q, got %q", volume.Name, claimClass, getVolumeClass(volume)) + if claimClass != "" && claimClass != getVolumeClass(volume) { + continue + } volumeQty := volume.Spec.Capacity[api.ResourceStorage] volumeSize := volumeQty.Value() @@ -142,17 +150,6 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo } } - // We want to provision volumes if the annotation is set even if there - // is matching PV. Therefore, do not look for available PV and let - // a new volume to be provisioned. - // - // When provisioner creates a new PV to this claim, an exact match - // pre-bound to the claim will be found by the checks above during - // subsequent claim sync. - if hasAnnotation(claim.ObjectMeta, annClass) { - return nil, nil - } - if smallestVolume != nil { // Found a matching volume return smallestVolume, nil diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index c055c3fa3cb23..f2365e10288e3 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -170,6 +170,51 @@ func TestMatchVolume(t *testing.T) { }, }, }, + "successful-match-with-class": { + expectedMatch: "gce-pd-silver1", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + Annotations: map[string]string{ + annClass: "silver", + }, + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{ + "should-exist": "true", + }, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + }, + }, + }, + }, + "successful-match-with-class-and-labels": { + expectedMatch: "gce-pd-silver2", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + Annotations: map[string]string{ + annClass: "silver", + }, + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + }, + }, + }, + }, } for name, scenario := range scenarios { @@ -573,6 +618,69 @@ func createTestVolumes() []*api.PersistentVolume { }, }, }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-silver1", + Name: "gce0023", + Labels: map[string]string{ + "should-exist": "true", + }, + Annotations: map[string]string{ + annClass: "silver", + }, + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10000G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-silver2", + Name: "gce0024", + Annotations: map[string]string{ + annClass: "silver", + }, + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("100G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-gold", + Name: "gce0025", + Annotations: map[string]string{ + annClass: "gold", + }, + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("50G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + }, } } diff --git a/pkg/controller/volume/persistentvolume/provision_test.go b/pkg/controller/volume/persistentvolume/provision_test.go index 51f6fb5c941b1..e9a8d0d8f54e2 100644 --- a/pkg/controller/volume/persistentvolume/provision_test.go +++ b/pkg/controller/volume/persistentvolume/provision_test.go @@ -33,7 +33,7 @@ func TestProvisionSync(t *testing.T) { // Provision a volume "11-1 - successful provision", novolumes, - newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), @@ -70,17 +70,12 @@ func TestProvisionSync(t *testing.T) { wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim), }, { - // Provision success - there is already a volume available, still - // we provision a new one when requested. + // No provisioning if there is a matching volume available "11-6 - provisioning when there is a volume available", - newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), - []*api.PersistentVolume{ - newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), - newVolume("pvc-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), - }, - newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), - // Binding will be completed in the next syncClaim + newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, annClass), + newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController, annClass), newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", api.ClaimBound, annClass, annBoundByController, annBindCompleted), noevents, noerrors, // No provisioning plugin confingure - makes the test fail when // the controller errorneously tries to provision something @@ -91,7 +86,7 @@ func TestProvisionSync(t *testing.T) { // a volume. "11-7 - claim is bound before provisioning", novolumes, - newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), // The claim would be bound in next syncClaim newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), @@ -100,7 +95,7 @@ func TestProvisionSync(t *testing.T) { // Create a volume before provisionClaimOperation starts. // This similates a parallel controller provisioning the volume. reactor.lock.Lock() - volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned) + volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass) reactor.volumes[volume.Name] = volume reactor.lock.Unlock() }), @@ -110,7 +105,7 @@ func TestProvisionSync(t *testing.T) { // second retry succeeds "11-8 - cannot save provisioned volume", novolumes, - newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), @@ -244,7 +239,7 @@ func TestProvisionMultiSync(t *testing.T) { // Provision a volume with binding "12-1 - successful provision", novolumes, - newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), From 358fb579bbdc0b9bcb7e7b70b1649a66f152f8d0 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:05 +0200 Subject: [PATCH 04/11] Add controller logic --- .../app/controllermanager.go | 9 +- .../controllermanager/controllermanager.go | 3 +- .../volume/persistentvolume/controller.go | 97 +++++++++++++++---- .../persistentvolume/controller_base.go | 56 ++++++++--- .../persistentvolume/controller_test.go | 2 +- .../volume/persistentvolume/framework_test.go | 7 +- .../volume/persistentvolume/provision_test.go | 2 +- pkg/volume/plugins.go | 31 +++++- .../persistent_volumes_test.go | 2 +- 9 files changed, 161 insertions(+), 48 deletions(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 5bc186de94123..5b5a968f9296f 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -390,20 +390,15 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig } } - provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration) - if err != nil { - glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") - } - volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, - provisioner, ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, s.ClusterName, - nil, nil, nil, + nil, nil, nil, nil, s.VolumeConfiguration.EnableDynamicProvisioning, + "", ) volumeController.Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index 7adad2caf79b6..2a1264bfeecb2 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -281,14 +281,15 @@ func (s *CMServer) Run(_ []string) error { volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, - provisioner, kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, s.ClusterName, nil, nil, nil, + nil, s.VolumeConfiguration.EnableDynamicProvisioning, + "", ) volumeController.Run() diff --git a/pkg/controller/volume/persistentvolume/controller.go b/pkg/controller/volume/persistentvolume/controller.go index 2b5e8d8171fa1..fa46d17605e7f 100644 --- a/pkg/controller/volume/persistentvolume/controller.go +++ b/pkg/controller/volume/persistentvolume/controller.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/record" @@ -117,13 +118,16 @@ type PersistentVolumeController struct { claimController *framework.Controller claimControllerStopCh chan struct{} claimSource cache.ListerWatcher + classReflector *cache.Reflector + classReflectorStopCh chan struct{} + classSource cache.ListerWatcher kubeClient clientset.Interface eventRecorder record.EventRecorder cloud cloudprovider.Interface - recyclePluginMgr vol.VolumePluginMgr - provisioner vol.ProvisionableVolumePlugin + volumePluginMgr vol.VolumePluginMgr enableDynamicProvisioning bool clusterName string + defaultStorageClass string // Cache of the last known version of volumes and claims. This cache is // thread safe as long as the volumes/claims there are not modified, they @@ -132,6 +136,7 @@ type PersistentVolumeController struct { // it saves newer version to etcd. volumes persistentVolumeOrderedIndex claims cache.Store + classes cache.Store // Map of scheduled/running operations. runningOperations goroutinemap.GoRoutineMap @@ -877,7 +882,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) // Find a plugin. spec := vol.NewSpecFromPersistentVolume(volume, false) - plugin, err := ctrl.recyclePluginMgr.FindRecyclablePluginBySpec(spec) + plugin, err := ctrl.volumePluginMgr.FindRecyclablePluginBySpec(spec) if err != nil { // No recycler found. Emit an event and mark the volume Failed. if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", "No recycler plugin found for the volume!"); err != nil { @@ -1031,13 +1036,32 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV // (it will be re-used in future provisioner error cases). func (ctrl *PersistentVolumeController) doDeleteVolume(volume *api.PersistentVolume) error { glog.V(4).Infof("doDeleteVolume [%s]", volume.Name) - // Find a plugin. + var err error + + // Find a plugin. Try to find the same plugin that provisioned the volume + var plugin vol.DeletableVolumePlugin + if hasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) { + provisionPluginName := volume.Annotations[annDynamicallyProvisioned] + if provisionPluginName != "" { + plugin, err = ctrl.volumePluginMgr.FindDeletablePluginByName(provisionPluginName) + if err != nil { + glog.V(3).Infof("did not find a deleter plugin %q for volume %q: %v, will try to find a generic one", + provisionPluginName, volume.Name, err) + } + } + } + spec := vol.NewSpecFromPersistentVolume(volume, false) - plugin, err := ctrl.recyclePluginMgr.FindDeletablePluginBySpec(spec) - if err != nil { - // No deleter found. Emit an event and mark the volume Failed. - return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err) + if plugin == nil { + // The plugin that provisioned the volume was not found or the volume + // was not dynamically provisioned. Try a generic plugin. + plugin, err = ctrl.volumePluginMgr.FindDeletablePluginBySpec(spec) + if err != nil { + // No deleter found. Emit an event and mark the volume Failed. + return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err) + } } + glog.V(5).Infof("found a deleter plugin %q for volume %q", plugin.GetPluginName(), volume.Name) // Plugin found deleter, err := plugin.NewDeleter(spec) @@ -1099,12 +1123,10 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa return } - // TODO: find provisionable plugin based on a class/profile - plugin := ctrl.provisioner - if plugin == nil { - // No provisioner found. Emit an event. - ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", "No provisioner plugin found for the claim!") - glog.V(2).Infof("no provisioner plugin found for claim %s!", claimToClaimKey(claim)) + plugin, storageClass, err := ctrl.findProvisionablePlugin(claim) + if err != nil { + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", err.Error()) + glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err) // The controller will retry provisioning the volume in every // syncVolume() call. return @@ -1124,21 +1146,23 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa ClusterName: ctrl.clusterName, PVName: pvName, PVCName: claim.Name, + ProvisionerParameters: storageClass.ProvisionerParameters, + ProvisionerSelector: claim.Spec.Selector, } // Provision the volume provisioner, err := plugin.NewProvisioner(options) if err != nil { strerr := fmt.Sprintf("Failed to create provisioner: %v", err) - glog.V(2).Infof("failed to create provisioner for claim %q: %v", claimToClaimKey(claim), err) + glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) return } volume, err = provisioner.Provision() if err != nil { - strerr := fmt.Sprintf("Failed to provision volume: %v", err) - glog.V(2).Infof("failed to provision volume for claim %q: %v", claimToClaimKey(claim), err) + strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) + glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) return } @@ -1151,6 +1175,8 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa volume.Spec.ClaimRef = claimRef volume.Status.Phase = api.VolumeBound + // TODO: copy all labels from StorageClass to the created volume? + // Add annBoundByController (used in deleting the volume) setAnnotation(&volume.ObjectMeta, annBoundByController, "yes") setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName()) @@ -1233,3 +1259,40 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, } } } + +func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *extensions.StorageClass, error) { + storageClass, err := ctrl.findStorageClass(claim) + if err != nil { + return nil, nil, err + } + + // Find a plugin for the class + plugin, err := ctrl.volumePluginMgr.FindProvisionablePluginByName(string(storageClass.ProvisionerType)) + if err != nil { + return nil, nil, err + } + return plugin, storageClass, nil +} + +func (ctrl *PersistentVolumeController) findStorageClass(claim *api.PersistentVolumeClaim) (*extensions.StorageClass, error) { + className := getClaimClass(claim) + if className == "" { + className = ctrl.defaultStorageClass + } + if className == "" { + return nil, fmt.Errorf("No default StorageClass configured") + } + + classObj, found, err := ctrl.classes.GetByKey(className) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("StorageClass %q not found", className) + } + class, ok := classObj.(*extensions.StorageClass) + if !ok { + return nil, fmt.Errorf("Cannot convert object to StorageClass: %+v", classObj) + } + return class, nil +} diff --git a/pkg/controller/volume/persistentvolume/controller_base.go b/pkg/controller/volume/persistentvolume/controller_base.go index ff39034a82801..a6442c2cef535 100644 --- a/pkg/controller/volume/persistentvolume/controller_base.go +++ b/pkg/controller/volume/persistentvolume/controller_base.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" @@ -47,13 +48,13 @@ import ( func NewPersistentVolumeController( kubeClient clientset.Interface, syncPeriod time.Duration, - provisioner vol.ProvisionableVolumePlugin, - recyclers []vol.VolumePlugin, + volumePlugins []vol.VolumePlugin, cloud cloudprovider.Interface, clusterName string, - volumeSource, claimSource cache.ListerWatcher, + volumeSource, claimSource, classSource cache.ListerWatcher, eventRecorder record.EventRecorder, enableDynamicProvisioning bool, + defaultStorageClass string, ) *PersistentVolumeController { if eventRecorder == nil { @@ -63,25 +64,20 @@ func NewPersistentVolumeController( } controller := &PersistentVolumeController{ - volumes: newPersistentVolumeOrderedIndex(), - claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc), - kubeClient: kubeClient, - eventRecorder: eventRecorder, - runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */), - cloud: cloud, - provisioner: provisioner, + volumes: newPersistentVolumeOrderedIndex(), + claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc), + kubeClient: kubeClient, + eventRecorder: eventRecorder, + runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */), + cloud: cloud, enableDynamicProvisioning: enableDynamicProvisioning, clusterName: clusterName, createProvisionedPVRetryCount: createProvisionedPVRetryCount, createProvisionedPVInterval: createProvisionedPVInterval, + defaultStorageClass: defaultStorageClass, } - controller.recyclePluginMgr.InitPlugins(recyclers, controller) - if controller.provisioner != nil { - if err := controller.provisioner.Init(controller); err != nil { - glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) - } - } + controller.volumePluginMgr.InitPlugins(volumePlugins, controller) if volumeSource == nil { volumeSource = &cache.ListWatch{ @@ -107,6 +103,18 @@ func NewPersistentVolumeController( } controller.claimSource = claimSource + if classSource == nil { + classSource = &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Extensions().StorageClasses().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Extensions().StorageClasses().Watch(options) + }, + } + } + controller.classSource = classSource + _, controller.volumeController = framework.NewIndexerInformer( volumeSource, &api.PersistentVolume{}, @@ -128,6 +136,16 @@ func NewPersistentVolumeController( DeleteFunc: controller.deleteClaim, }, ) + + // This is just a cache of StorageClass instances, no special actions are + // needed when a class is created/deleted/updated. + controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) + controller.classReflector = cache.NewReflector( + classSource, + &extensions.StorageClass{}, + controller.classes, + syncPeriod, + ) return controller } @@ -433,6 +451,11 @@ func (ctrl *PersistentVolumeController) Run() { ctrl.claimControllerStopCh = make(chan struct{}) go ctrl.claimController.Run(ctrl.claimControllerStopCh) } + + if ctrl.classReflectorStopCh == nil { + ctrl.classReflectorStopCh = make(chan struct{}) + go ctrl.classReflector.RunUntil(ctrl.classReflectorStopCh) + } } // Stop gracefully shuts down this controller @@ -440,6 +463,7 @@ func (ctrl *PersistentVolumeController) Stop() { glog.V(4).Infof("stopping PersistentVolumeController") close(ctrl.volumeControllerStopCh) close(ctrl.claimControllerStopCh) + close(ctrl.classReflectorStopCh) } const ( diff --git a/pkg/controller/volume/persistentvolume/controller_test.go b/pkg/controller/volume/persistentvolume/controller_test.go index 638fae112f51a..76e2ddfbe3e8f 100644 --- a/pkg/controller/volume/persistentvolume/controller_test.go +++ b/pkg/controller/volume/persistentvolume/controller_test.go @@ -164,7 +164,7 @@ func TestControllerSync(t *testing.T) { client := &fake.Clientset{} volumeSource := framework.NewFakePVControllerSource() claimSource := framework.NewFakePVCControllerSource() - ctrl := newTestController(client, volumeSource, claimSource, true) + ctrl := newTestController(client, volumeSource, claimSource, nil, true) reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) for _, claim := range test.initialClaims { claimSource.Add(claim) diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index eeef10124e8d6..6a7a8954eba17 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -555,7 +555,7 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, return reactor } -func newTestController(kubeClient clientset.Interface, volumeSource, claimSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController { +func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController { if volumeSource == nil { volumeSource = framework.NewFakePVControllerSource() } @@ -571,6 +571,7 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource "", volumeSource, claimSource, + classSource, record.NewFakeRecorder(1000), // event recorder enableDynamicProvisioning, ) @@ -839,7 +840,7 @@ func runSyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} - ctrl := newTestController(client, nil, nil, true) + ctrl := newTestController(client, nil, nil, nil, true) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) @@ -883,7 +884,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} - ctrl := newTestController(client, nil, nil, true) + ctrl := newTestController(client, nil, nil, nil, true) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) diff --git a/pkg/controller/volume/persistentvolume/provision_test.go b/pkg/controller/volume/persistentvolume/provision_test.go index e9a8d0d8f54e2..28f044f6c412c 100644 --- a/pkg/controller/volume/persistentvolume/provision_test.go +++ b/pkg/controller/volume/persistentvolume/provision_test.go @@ -252,7 +252,7 @@ func TestProvisionMultiSync(t *testing.T) { // When provisioning is disabled, provisioning a claim should instantly return nil func TestDisablingDynamicProvisioner(t *testing.T) { - ctrl := newTestController(nil, nil, nil, false) + ctrl := newTestController(nil, nil, nil, nil, false) retVal := ctrl.provisionClaim(nil) if retVal != nil { t.Errorf("Expected nil return but got %v", retVal) diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index 669aff661b69e..a9552571fad42 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/types" @@ -55,6 +56,10 @@ type VolumeOptions struct { ClusterName string // Tags to attach to the real volume in the cloud provider - e.g. AWS EBS CloudTags *map[string]string + // Volume provisioning parameters from StorageClass + ProvisionerParameters map[string]string + // Volume selector from PersistentVolumeClaim + ProvisionerSelector *unversioned.LabelSelector } // VolumePlugin is an interface to volume plugins that can be used on a @@ -419,7 +424,18 @@ func (pm *VolumePluginMgr) FindRecyclablePluginBySpec(spec *Spec) (RecyclableVol return nil, fmt.Errorf("no recyclable volume plugin matched") } -// FindDeletablePluginByName fetches a persistent volume plugin by name. If +func (pm *VolumePluginMgr) FindProvisionablePluginByName(name string) (ProvisionableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok { + return provisionableVolumePlugin, nil + } + return nil, fmt.Errorf("no recyclable volume plugin matched") +} + +// FindDeletablePluginBySppec fetches a persistent volume plugin by spec. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) @@ -432,6 +448,19 @@ func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolum return nil, fmt.Errorf("no deletable volume plugin matched") } +// FindDeletablePluginByName fetches a persistent volume plugin by name. If +// no plugin is found, returns error. +func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok { + return deletableVolumePlugin, nil + } + return nil, fmt.Errorf("no deletable volume plugin matched") +} + // FindCreatablePluginBySpec fetches a persistent volume plugin by name. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableVolumePlugin, error) { diff --git a/test/integration/persistentvolumes/persistent_volumes_test.go b/test/integration/persistentvolumes/persistent_volumes_test.go index 17d1f07cd3737..62ff9b6e91a20 100644 --- a/test/integration/persistentvolumes/persistent_volumes_test.go +++ b/test/integration/persistentvolumes/persistent_volumes_test.go @@ -900,7 +900,7 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server) (*client cloud := &fake_cloud.FakeCloud{} syncPeriod := getSyncPeriod() - ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, plugin, plugins, cloud, "", nil, nil, nil, true) + ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, plugins, cloud, "", nil, nil, nil, nil, true, "") watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{}) if err != nil { From 0a836e1783756752baf3c181922857db654c765d Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:06 +0200 Subject: [PATCH 05/11] Add controller unit tests. --- .../volume/persistentvolume/binder_test.go | 5 +- .../persistentvolume/controller_test.go | 2 +- .../volume/persistentvolume/delete_test.go | 21 +-- .../volume/persistentvolume/framework_test.go | 136 ++++++++++++------ .../volume/persistentvolume/provision_test.go | 123 +++++++++++++--- .../volume/persistentvolume/recycle_test.go | 23 +-- 6 files changed, 217 insertions(+), 93 deletions(-) diff --git a/pkg/controller/volume/persistentvolume/binder_test.go b/pkg/controller/volume/persistentvolume/binder_test.go index 4d8ea0ee1ab00..59a283a27b654 100644 --- a/pkg/controller/volume/persistentvolume/binder_test.go +++ b/pkg/controller/volume/persistentvolume/binder_test.go @@ -20,6 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" ) // Test single call to syncClaim and syncVolume methods. @@ -422,7 +423,7 @@ func TestSync(t *testing.T) { noevents, noerrors, testSyncVolume, }, } - runSyncTests(t, tests) + runSyncTests(t, tests, []*extensions.StorageClass{}, "") } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -469,5 +470,5 @@ func TestMultiSync(t *testing.T) { }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, []*extensions.StorageClass{}, "") } diff --git a/pkg/controller/volume/persistentvolume/controller_test.go b/pkg/controller/volume/persistentvolume/controller_test.go index 76e2ddfbe3e8f..34fb3780816fc 100644 --- a/pkg/controller/volume/persistentvolume/controller_test.go +++ b/pkg/controller/volume/persistentvolume/controller_test.go @@ -164,7 +164,7 @@ func TestControllerSync(t *testing.T) { client := &fake.Clientset{} volumeSource := framework.NewFakePVControllerSource() claimSource := framework.NewFakePVCControllerSource() - ctrl := newTestController(client, volumeSource, claimSource, nil, true) + ctrl := newTestController(client, volumeSource, claimSource, nil, true, "") reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) for _, claim := range test.initialClaims { claimSource.Add(claim) diff --git a/pkg/controller/volume/persistentvolume/delete_test.go b/pkg/controller/volume/persistentvolume/delete_test.go index c134c0bfc9a09..6cb4ad0943edc 100644 --- a/pkg/controller/volume/persistentvolume/delete_test.go +++ b/pkg/controller/volume/persistentvolume/delete_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" ) // Test single call to syncVolume, expecting recycling to happen. @@ -39,7 +40,7 @@ func TestDeleteSync(t *testing.T) { noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. - wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), }, { // delete volume bound by user @@ -51,7 +52,7 @@ func TestDeleteSync(t *testing.T) { noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. - wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), }, { // delete failure - plugin not found @@ -70,7 +71,7 @@ func TestDeleteSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedDelete"}, noerrors, - wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), }, { // delete failure - delete() returns error @@ -80,7 +81,7 @@ func TestDeleteSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedDelete"}, noerrors, - wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume), }, { // delete success(?) - volume is deleted before doDelete() starts @@ -90,7 +91,7 @@ func TestDeleteSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Delete the volume before delete operation starts reactor.lock.Lock() delete(reactor.volumes, "volume8-6") @@ -107,7 +108,7 @@ func TestDeleteSync(t *testing.T) { noclaims, newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound), noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { reactor.lock.Lock() defer reactor.lock.Unlock() // Bind the volume to resurrected claim (this should never @@ -130,10 +131,10 @@ func TestDeleteSync(t *testing.T) { noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. - wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), }, } - runSyncTests(t, tests) + runSyncTests(t, tests, []*extensions.StorageClass{}, "") } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -161,9 +162,9 @@ func TestDeleteMultiSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedDelete"}, noerrors, - wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, []*extensions.StorageClass{}, "") } diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 6a7a8954eba17..f16e561dd4910 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" @@ -555,7 +556,7 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, return reactor } -func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController { +func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool, defaultStorageClass string) *PersistentVolumeController { if volumeSource == nil { volumeSource = framework.NewFakePVControllerSource() } @@ -565,7 +566,6 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource ctrl := NewPersistentVolumeController( kubeClient, 5*time.Second, // sync period - nil, // provisioner []vol.VolumePlugin{}, // recyclers nil, // cloud "", @@ -574,6 +574,7 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource classSource, record.NewFakeRecorder(1000), // event recorder enableDynamicProvisioning, + defaultStorageClass, ) // Speed up the test @@ -581,27 +582,6 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource return ctrl } -func addRecyclePlugin(ctrl *PersistentVolumeController, expectedRecycleCalls []error) { - plugin := &mockVolumePlugin{ - recycleCalls: expectedRecycleCalls, - } - ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) -} - -func addDeletePlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { - plugin := &mockVolumePlugin{ - deleteCalls: expectedDeleteCalls, - } - ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) -} - -func addProvisionPlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { - plugin := &mockVolumePlugin{ - provisionCalls: expectedDeleteCalls, - } - ctrl.provisioner = plugin -} - // newVolume returns a new volume with given attributes func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) *api.PersistentVolume { volume := api.PersistentVolume{ @@ -678,6 +658,17 @@ func withMessage(message string, volumes []*api.PersistentVolume) []*api.Persist return volumes } +// volumeWithClass saves given class into annClass annotation. +// Meant to be used to compose claims specified inline in a test. +func volumeWithClass(className string, volumes []*api.PersistentVolume) []*api.PersistentVolume { + if volumes[0].Annotations == nil { + volumes[0].Annotations = map[string]string{annClass: className} + } else { + volumes[0].Annotations[annClass] = className + } + return volumes +} + // newVolumeArray returns array with a single volume that would be returned by // newVolume() with the same parameters. func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) []*api.PersistentVolume { @@ -733,6 +724,17 @@ func newClaimArray(name, claimUID, capacity, boundToVolume string, phase api.Per } } +// claimWithClass saves given class into annClass annotation. +// Meant to be used to compose claims specified inline in a test. +func claimWithClass(className string, claims []*api.PersistentVolumeClaim) []*api.PersistentVolumeClaim { + if claims[0].Annotations == nil { + claims[0].Annotations = map[string]string{annClass: className} + } else { + claims[0].Annotations[annClass] = className + } + return claims +} + func testSyncClaim(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { return ctrl.syncClaim(test.initialClaims[0]) } @@ -754,29 +756,45 @@ type operationType string const operationDelete = "Delete" const operationRecycle = "Recycle" -const operationProvision = "Provision" -// wrapTestWithControllerConfig returns a testCall that: -// - configures controller with recycler, deleter or provisioner which will -// return provided errors when a volume is deleted, recycled or provisioned +// wrapTestWithReclaimCalls returns a testCall that: +// - configures controller with a volume plugin that implements recycler, +// deleter and provisioner. The plugin retunrs provided errors when a volume +// is deleted, recycled or provisioned. // - calls given testCall -func wrapTestWithControllerConfig(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { - expected := expectedOperationCalls - +func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall { return func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { - switch operation { - case operationDelete: - addDeletePlugin(ctrl, expected) - case operationRecycle: - addRecyclePlugin(ctrl, expected) - case operationProvision: - addProvisionPlugin(ctrl, expected) + plugin := &mockVolumePlugin{ + recycleCalls: expectedRecycleCalls, + deleteCalls: expectedDeleteCalls, + provisionCalls: expectedProvisionCalls, } + ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) return toWrap(ctrl, reactor, test) } } +// wrapTestWithReclaimCalls returns a testCall that: +// - configures controller with recycler or deleter which will return provided +// errors when a volume is deleted or recycled +// - calls given testCall +func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { + if operation == operationDelete { + return wrapTestWithPluginCalls(nil, expectedOperationCalls, nil, toWrap) + } else { + return wrapTestWithPluginCalls(expectedOperationCalls, nil, nil, toWrap) + } +} + +// wrapTestWithProvisionCalls returns a testCall that: +// - configures controller with a provisioner which will return provided errors +// when a claim is provisioned +// - calls given testCall +func wrapTestWithProvisionCalls(expectedProvisionCalls []provisionCall, toWrap testCall) testCall { + return wrapTestWithPluginCalls(nil, nil, expectedProvisionCalls, toWrap) +} + // wrapTestWithInjectedOperation returns a testCall that: // - starts the controller and lets it run original testCall until // scheduleOperation() call. It blocks the controller there and calls the @@ -834,13 +852,13 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto // 2. Call the tested function (syncClaim/syncVolume) via // controllerTest.testCall *once*. // 3. Compare resulting volumes and claims with expected volumes and claims. -func runSyncTests(t *testing.T, tests []controllerTest) { +func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*extensions.StorageClass, defaultStorageClass string) { for _, test := range tests { glog.V(4).Infof("starting test %q", test.name) // Initialize the controller client := &fake.Clientset{} - ctrl := newTestController(client, nil, nil, nil, true) + ctrl := newTestController(client, nil, nil, nil, true, defaultStorageClass) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) @@ -851,6 +869,14 @@ func runSyncTests(t *testing.T, tests []controllerTest) { reactor.volumes[volume.Name] = volume } + // Convert classes to []interface{} and forcefuly inject them to the + // controller. + storageClassPtrs := make([]interface{}, len(storageClasses)) + for i, s := range storageClasses { + storageClassPtrs[i] = s + } + ctrl.classes.Replace(storageClassPtrs, "1") + // Run the tested functions err := test.test(ctrl, reactor, test) if err != nil { @@ -878,13 +904,22 @@ func runSyncTests(t *testing.T, tests []controllerTest) { // 5. When 3. does not do any changes, finish the tests and compare final set // of volumes/claims with expected claims/volumes and report differences. // Some limit of calls in enforced to prevent endless loops. -func runMultisyncTests(t *testing.T, tests []controllerTest) { +func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*extensions.StorageClass, defaultStorageClass string) { for _, test := range tests { glog.V(4).Infof("starting multisync test %q", test.name) // Initialize the controller client := &fake.Clientset{} - ctrl := newTestController(client, nil, nil, nil, true) + ctrl := newTestController(client, nil, nil, nil, true, defaultStorageClass) + + // Convert classes to []interface{} and forcefuly inject them to the + // controller. + storageClassPtrs := make([]interface{}, len(storageClasses)) + for i, s := range storageClasses { + storageClassPtrs[i] = s + } + ctrl.classes.Replace(storageClassPtrs, "1") + reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) @@ -980,7 +1015,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { // Dummy volume plugin for provisioning, deletion and recycling. It contains // lists of expected return values to simulate errors. type mockVolumePlugin struct { - provisionCalls []error + provisionCalls []provisionCall provisionCallCounter int deleteCalls []error deleteCallCounter int @@ -989,6 +1024,11 @@ type mockVolumePlugin struct { provisionOptions vol.VolumeOptions } +type provisionCall struct { + expectedProvisionerParameters map[string]string + ret error +} + var _ vol.VolumePlugin = &mockVolumePlugin{} var _ vol.RecyclableVolumePlugin = &mockVolumePlugin{} var _ vol.DeletableVolumePlugin = &mockVolumePlugin{} @@ -1041,8 +1081,12 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { } var pv *api.PersistentVolume - err := plugin.provisionCalls[plugin.provisionCallCounter] - if err == nil { + call := plugin.provisionCalls[plugin.provisionCallCounter] + if !reflect.DeepEqual(call.expectedProvisionerParameters, plugin.provisionOptions.ProvisionerParameters) { + glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedProvisionerParameters, plugin.provisionOptions.ProvisionerParameters) + return nil, fmt.Errorf("Mock plugin error: invalid provisioner call") + } + if call.ret == nil { // Create a fake PV with known GCE volume (to match expected volume) pv = &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ @@ -1062,8 +1106,8 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { } plugin.provisionCallCounter++ - glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, err) - return pv, err + glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret) + return pv, call.ret } // Deleter interfaces diff --git a/pkg/controller/volume/persistentvolume/provision_test.go b/pkg/controller/volume/persistentvolume/provision_test.go index 28f044f6c412c..52738f33fc0f8 100644 --- a/pkg/controller/volume/persistentvolume/provision_test.go +++ b/pkg/controller/volume/persistentvolume/provision_test.go @@ -21,8 +21,59 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" ) +var class1Parameters = map[string]string{ + "param1": "value1", +} +var class2Parameters = map[string]string{ + "param2": "value2", +} +var storageClasses = []*extensions.StorageClass{ + { + TypeMeta: unversioned.TypeMeta{ + Kind: "StorageClass", + }, + + ObjectMeta: api.ObjectMeta{ + Name: "gold", + }, + + ProvisionerType: mockPluginName, + ProvisionerParameters: class1Parameters, + }, + { + TypeMeta: unversioned.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: api.ObjectMeta{ + Name: "silver", + }, + ProvisionerType: mockPluginName, + ProvisionerParameters: class2Parameters, + }, +} + +// call to storageClass 1, returning an error +var provision1Error = provisionCall{ + ret: errors.New("Moc provisioner error"), + expectedProvisionerParameters: class1Parameters, +} + +// call to storageClass 1, returning a valid PV +var provision1Success = provisionCall{ + ret: nil, + expectedProvisionerParameters: class1Parameters, +} + +// call to storageClass 2, returning a valid PV +var provision2Success = provisionCall{ + ret: nil, + expectedProvisionerParameters: class2Parameters, +} + // Test single call to syncVolume, expecting provisioning to happen. // 1. Fill in the controller with initial data // 2. Call the syncVolume *once*. @@ -30,14 +81,14 @@ import ( func TestProvisionSync(t *testing.T) { tests := []controllerTest{ { - // Provision a volume - "11-1 - successful provision", + // Provision a volume (with the default class) + "11-1 - successful provision with storage class 1", novolumes, newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass), newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), - noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision failure - plugin not found @@ -57,7 +108,7 @@ func TestProvisionSync(t *testing.T) { newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), []string{"Warning ProvisioningFailed"}, noerrors, - wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), }, { // Provision failure - Provision returns error @@ -67,7 +118,7 @@ func TestProvisionSync(t *testing.T) { newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), []string{"Warning ProvisioningFailed"}, noerrors, - wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Error}, testSyncClaim), }, { // No provisioning if there is a matching volume available @@ -79,7 +130,7 @@ func TestProvisionSync(t *testing.T) { noevents, noerrors, // No provisioning plugin confingure - makes the test fail when // the controller errorneously tries to provision something - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision success? - claim is bound before provisioner creates @@ -91,7 +142,7 @@ func TestProvisionSync(t *testing.T) { // The claim would be bound in next syncClaim newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Create a volume before provisionClaimOperation starts. // This similates a parallel controller provisioning the volume. reactor.lock.Lock() @@ -116,7 +167,7 @@ func TestProvisionSync(t *testing.T) { // will succeed. {"create", "persistentvolumes", errors.New("Mock creation error")}, }, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision success? - cannot save provisioned PV five times, @@ -136,8 +187,12 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error4")}, {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, - wrapTestWithControllerConfig(operationDelete, []error{nil}, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim)), + wrapTestWithPluginCalls( + nil, // recycle calls + []error{nil}, // delete calls + []provisionCall{provision1Success}, // provision calls + testSyncClaim, + ), }, { // Provision failure - cannot save provisioned PV five times, @@ -158,7 +213,7 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, // No deleteCalls are configured, which results into no deleter plugin available for the volume - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, { // Provision failure - cannot save provisioned PV five times, @@ -178,16 +233,17 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error4")}, {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, - wrapTestWithControllerConfig( - operationDelete, []error{ + wrapTestWithPluginCalls( + nil, // recycle calls + []error{ // delete calls errors.New("Mock deletion error1"), errors.New("Mock deletion error2"), errors.New("Mock deletion error3"), errors.New("Mock deletion error4"), errors.New("Mock deletion error5"), }, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), - ), + []provisionCall{provision1Success}, // provision calls + testSyncClaim), }, { // Provision failure - cannot save provisioned PV five times, @@ -207,16 +263,37 @@ func TestProvisionSync(t *testing.T) { {"create", "persistentvolumes", errors.New("Mock creation error4")}, {"create", "persistentvolumes", errors.New("Mock creation error5")}, }, - wrapTestWithControllerConfig( - operationDelete, []error{ + wrapTestWithPluginCalls( + nil, // recycle calls + []error{ // delete calls errors.New("Mock deletion error1"), nil, - }, - wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, // provison calls + []provisionCall{provision1Success}, + testSyncClaim, ), }, + { + // Provision a volume (with non-default class) + "11-13 - successful provision with storage class 2", + novolumes, + volumeWithClass("silver", newVolumeArray("pvc-uid11-13", "1Gi", "uid11-13", "claim11-13", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned)), + claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", api.ClaimPending)), + // Binding will be completed in the next syncClaim + claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", api.ClaimPending)), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision2Success}, testSyncClaim), + }, + { + // Provision error - non existing class + "11-14 - fail due to non-existing class", + novolumes, + novolumes, + claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", api.ClaimPending)), + claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", api.ClaimPending)), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), + }, } - runSyncTests(t, tests) + runSyncTests(t, tests, storageClasses, storageClasses[0].Name) } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -243,16 +320,16 @@ func TestProvisionMultiSync(t *testing.T) { newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), - noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name) } // When provisioning is disabled, provisioning a claim should instantly return nil func TestDisablingDynamicProvisioner(t *testing.T) { - ctrl := newTestController(nil, nil, nil, nil, false) + ctrl := newTestController(nil, nil, nil, nil, false, "") retVal := ctrl.provisionClaim(nil) if retVal != nil { t.Errorf("Expected nil return but got %v", retVal) diff --git a/pkg/controller/volume/persistentvolume/recycle_test.go b/pkg/controller/volume/persistentvolume/recycle_test.go index c6fde8b3fe140..1e6443bbc7bb5 100644 --- a/pkg/controller/volume/persistentvolume/recycle_test.go +++ b/pkg/controller/volume/persistentvolume/recycle_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" ) // Test single call to syncVolume, expecting recycling to happen. @@ -39,7 +40,7 @@ func TestRecycleSync(t *testing.T) { noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. - wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), }, { // recycle volume bound by user @@ -51,7 +52,7 @@ func TestRecycleSync(t *testing.T) { noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. - wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), }, { // recycle failure - plugin not found @@ -70,7 +71,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedRecycle"}, noerrors, - wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), }, { // recycle failure - recycle returns error @@ -80,7 +81,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedRecycle"}, noerrors, - wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume), }, { // recycle success(?) - volume is deleted before doRecycle() starts @@ -90,7 +91,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Delete the volume before recycle operation starts reactor.lock.Lock() delete(reactor.volumes, "volume6-6") @@ -107,7 +108,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Mark the volume as Available before the recycler starts reactor.lock.Lock() volume := reactor.volumes["volume6-7"] @@ -128,7 +129,7 @@ func TestRecycleSync(t *testing.T) { noclaims, noclaims, noevents, noerrors, - wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Mark the volume as Available before the recycler starts reactor.lock.Lock() volume := reactor.volumes["volume6-8"] @@ -148,7 +149,7 @@ func TestRecycleSync(t *testing.T) { noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. - wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), }, { // volume has unknown reclaim policy - failure expected @@ -160,7 +161,7 @@ func TestRecycleSync(t *testing.T) { []string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume, }, } - runSyncTests(t, tests) + runSyncTests(t, tests, []*extensions.StorageClass{}, "") } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -188,9 +189,9 @@ func TestRecycleMultiSync(t *testing.T) { noclaims, noclaims, []string{"Warning VolumeFailedRecycle"}, noerrors, - wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume), + wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume), }, } - runMultisyncTests(t, tests) + runMultisyncTests(t, tests, []*extensions.StorageClass{}, "") } From 90893d09b5c2edb7f18a763d997a7c41fc423f97 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:07 +0200 Subject: [PATCH 06/11] Add AWS plugin --- pkg/cloudprovider/providers/aws/aws.go | 59 +++++++++++++++++++++++--- pkg/volume/aws_ebs/aws_util.go | 24 +++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index e5897123ae0f9..021c4b9270db5 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -28,6 +28,8 @@ import ( "sync" "time" + "gopkg.in/gcfg.v1" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" @@ -39,7 +41,6 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" "github.com/golang/glog" - "gopkg.in/gcfg.v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/service" @@ -208,11 +209,24 @@ type EC2Metadata interface { GetMetadata(path string) (string, error) } +const ( + VolumeTypeIO1 = "io1" + VolumeTypeGP2 = "gp2" + VolumeTypeSC1 = "sc1" + VolumeTypeST1 = "st1" +) + // VolumeOptions specifies capacity and tags for a volume. type VolumeOptions struct { - CapacityGB int - Tags map[string]string - PVCName string + CapacityGB int + Tags map[string]string + PVCName string + VolumeType string + AvailabilityZone string + // IOPSPerGB x CapacityGB will give total IOPS of the volume to create. + // IPSPerGB must be bigger than zero and smaller or equal to 30. + // Calculated total IOPS will be capped at 20000 IOPS. + IOPSPerGB int } // Volumes is an interface for managing cloud-provisioned volumes @@ -1465,14 +1479,47 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) { return "", fmt.Errorf("error querying for all zones: %v", err) } - createAZ := volume.ChooseZoneForVolume(allZones, volumeOptions.PVCName) + createAZ := volumeOptions.AvailabilityZone + if createAZ == "" { + createAZ = volume.ChooseZoneForVolume(allZones, volumeOptions.PVCName) + } + + var createType string + var iops int64 + switch volumeOptions.VolumeType { + case VolumeTypeGP2, VolumeTypeSC1, VolumeTypeST1: + createType = volumeOptions.VolumeType + + case VolumeTypeIO1: + // See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html for IOPS constraints + if volumeOptions.IOPSPerGB <= 0 || volumeOptions.IOPSPerGB > 30 { + return "", fmt.Errorf("invalid iopsPerGB value %d, must be 0 < IOPSPerGB <= 30", volumeOptions.IOPSPerGB) + } + createType = volumeOptions.VolumeType + iops = int64(volumeOptions.CapacityGB * volumeOptions.IOPSPerGB) + if iops < 100 { + iops = 100 + } + if iops > 20000 { + iops = 20000 + } + + case "": + createType = DefaultVolumeType + + default: + return "", fmt.Errorf("invalid AWS VolumeType %q", volumeOptions.VolumeType) + } // TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?) request := &ec2.CreateVolumeInput{} request.AvailabilityZone = &createAZ volSize := int64(volumeOptions.CapacityGB) request.Size = &volSize - request.VolumeType = aws.String(DefaultVolumeType) + request.VolumeType = &createType + if iops > 0 { + request.Iops = &iops + } response, err := c.ec2.CreateVolume(request) if err != nil { return "", err diff --git a/pkg/volume/aws_ebs/aws_util.go b/pkg/volume/aws_ebs/aws_util.go index 723d15c337562..67e9d5dbe6605 100644 --- a/pkg/volume/aws_ebs/aws_util.go +++ b/pkg/volume/aws_ebs/aws_util.go @@ -19,6 +19,8 @@ package aws_ebs import ( "fmt" "os" + "strconv" + "strings" "time" "github.com/golang/glog" @@ -84,6 +86,28 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (strin Tags: tags, PVCName: c.options.PVCName, } + // Apply ProvisionerParameters (case-insensitive). We leave validation of + // the values to the cloud provider. + for k, v := range c.options.ProvisionerParameters { + switch strings.ToLower(k) { + case "type": + volumeOptions.VolumeType = v + case "zone": + volumeOptions.AvailabilityZone = v + case "iopspergb": + volumeOptions.IOPSPerGB, err = strconv.Atoi(v) + if err != nil { + return "", 0, nil, fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err) + } + default: + return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName()) + } + } + + // TODO: implement c.options.ProvisionerSelector parsing + if c.options.ProvisionerSelector != nil { + return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS") + } name, err := cloud.CreateDisk(volumeOptions) if err != nil { From da36c173518be374782ead2113a17f09f1969789 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:08 +0200 Subject: [PATCH 07/11] Gce --- pkg/cloudprovider/providers/gce/gce.go | 25 +++++++++++++++-- pkg/volume/gce_pd/attacher_test.go | 2 +- pkg/volume/gce_pd/gce_util.go | 37 ++++++++++++++++++++------ test/e2e/pd.go | 2 +- 4 files changed, 54 insertions(+), 12 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index ea1e644e0e5c2..7ecb28f315b33 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -101,6 +101,16 @@ type Config struct { } } +type DiskType string + +const ( + DiskTypeSSD = "pd-ssd" + DiskTypeStandard = "pd-standard" + + diskTypeDefault = DiskTypeStandard + diskTypeUriTemplate = "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/%s" +) + // Disks is interface for manipulation with GCE PDs. type Disks interface { // AttachDisk attaches given disk to given instance. Current instance @@ -116,7 +126,7 @@ type Disks interface { // CreateDisk creates a new PD with given properties. Tags are serialized // as JSON into Description field. - CreateDisk(name string, zone string, sizeGb int64, tags map[string]string) error + CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error // DeleteDisk deletes PD. DeleteDisk(diskToDelete string) error @@ -2260,16 +2270,27 @@ func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) { // CreateDisk creates a new Persistent Disk, with the specified name & size, in // the specified zone. It stores specified tags endoced in JSON in Description // field. -func (gce *GCECloud) CreateDisk(name string, zone string, sizeGb int64, tags map[string]string) error { +func (gce *GCECloud) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { tagsStr, err := gce.encodeDiskTags(tags) if err != nil { return err } + switch diskType { + case DiskTypeSSD, DiskTypeStandard: + // noop + case "": + diskType = diskTypeDefault + default: + return fmt.Errorf("invalid GCE disk type %q", diskType) + } + diskTypeUri := fmt.Sprintf(diskTypeUriTemplate, gce.projectID, zone, diskType) + diskToCreate := &compute.Disk{ Name: name, SizeGb: sizeGb, Description: tagsStr, + Type: diskTypeUri, } createOp, err := gce.service.Disks.Insert(gce.projectID, zone, diskToCreate).Do() diff --git a/pkg/volume/gce_pd/attacher_test.go b/pkg/volume/gce_pd/attacher_test.go index fc3283f68a1a3..328fbf0581ac7 100644 --- a/pkg/volume/gce_pd/attacher_test.go +++ b/pkg/volume/gce_pd/attacher_test.go @@ -351,7 +351,7 @@ func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, err return expected.isAttached, expected.ret } -func (testcase *testcase) CreateDisk(name string, zone string, sizeGb int64, tags map[string]string) error { +func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { return errors.New("Not implemented") } diff --git a/pkg/volume/gce_pd/gce_util.go b/pkg/volume/gce_pd/gce_util.go index 4b0a58c41b19f..9a1677bcd8a1d 100644 --- a/pkg/volume/gce_pd/gce_util.go +++ b/pkg/volume/gce_pd/gce_util.go @@ -80,17 +80,38 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin // GCE works with gigabytes, convert to GiB with rounding up requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024) - // The disk will be created in the zone in which this code is currently running - // TODO: We should support auto-provisioning volumes in multiple/specified zones - zones, err := cloud.GetAllZones() - if err != nil { - glog.V(2).Infof("error getting zone information from GCE: %v", err) - return "", 0, nil, err + // Apply ProvisionerParameters (case-insensitive). We leave validation of + // the values to the cloud provider. + diskType := "" + zone := "" + for k, v := range c.options.ProvisionerParameters { + switch strings.ToLower(k) { + case "type": + diskType = v + case "zone": + zone = v + default: + return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName()) + } } - zone := volume.ChooseZoneForVolume(zones, c.options.PVCName) + // TODO: implement c.options.ProvisionerSelector parsing + if c.options.ProvisionerSelector != nil { + return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE") + } + + if zone == "" { + // No zone specified, choose one randomly in the same region as the + // node is running. + zones, err := cloud.GetAllZones() + if err != nil { + glog.V(2).Infof("error getting zone information from GCE: %v", err) + return "", 0, nil, err + } + zone = volume.ChooseZoneForVolume(zones, c.options.PVCName) + } - err = cloud.CreateDisk(name, zone, int64(requestGB), *c.options.CloudTags) + err = cloud.CreateDisk(name, diskType, zone, int64(requestGB), *c.options.CloudTags) if err != nil { glog.V(2).Infof("Error creating GCE PD volume: %v", err) return "", 0, nil, err diff --git a/test/e2e/pd.go b/test/e2e/pd.go index 7bb95246d4aa0..19f40c6057175 100644 --- a/test/e2e/pd.go +++ b/test/e2e/pd.go @@ -469,7 +469,7 @@ func createPD() (string, error) { } tags := map[string]string{} - err = gceCloud.CreateDisk(pdName, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags) + err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags) if err != nil { return "", err } From 1b5038fe073070f312c37bbc4cb8ef970e0f3374 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:09 +0200 Subject: [PATCH 08/11] Rework volume plugin probing. We do not distinguish provisioner and recycler/deleter plugins, they can be probed in the same method. We need to handle --enable-hostpath-provisioning in the plugin now. --- .../app/controllermanager.go | 2 +- cmd/kube-controller-manager/app/plugins.go | 32 ++++++------------- .../controllermanager/controllermanager.go | 7 +--- pkg/volume/host_path/host_path.go | 14 ++------ pkg/volume/host_path/host_path_test.go | 3 +- pkg/volume/plugins.go | 4 +++ 6 files changed, 21 insertions(+), 41 deletions(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 5b5a968f9296f..261a741e3842d 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -393,7 +393,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, - ProbeRecyclableVolumePlugins(s.VolumeConfiguration), + ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration), cloud, s.ClusterName, nil, nil, nil, nil, diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index 8ea23ecca2ac6..af13c470de295 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -62,8 +62,9 @@ func ProbeAttachableVolumePlugins(config componentconfig.VolumeConfiguration) [] return allPlugins } -// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list. -func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin { +// ProbeControllerVolumePlugins collects all persistent volume plugins into an easy to use list. +// Only provisioner/recycler/deleter volume plugins should be returned. +func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by this binary, not @@ -79,6 +80,7 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) [] RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath), RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath), RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), + ProvisioningEnabled: config.EnableHostPathProvisioning, } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) @@ -95,32 +97,18 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) [] } allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) - - return allPlugins -} - -// NewVolumeProvisioner returns a volume provisioner to use when running in a cloud or development environment. -// The beta implementation of provisioning allows 1 implied provisioner per cloud, until we allow configuration of many. -// We explicitly map clouds to volume plugins here which allows us to configure many later without backwards compatibility issues. -// Not all cloudproviders have provisioning capability, which is the reason for the bool in the return to tell the caller to expect one or not. -func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) { switch { - case cloud == nil && config.EnableHostPathProvisioning: - return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{})) case cloud != nil && aws.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins()) + allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) case cloud != nil && gce.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins()) + allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) case cloud != nil && openstack.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins()) + allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) case cloud != nil && vsphere.ProviderName == cloud.ProviderName(): - return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins()) + allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) } - return nil, nil + + return allPlugins } func getProvisionablePluginFromVolumePlugins(plugins []volume.VolumePlugin) (volume.ProvisionableVolumePlugin, error) { diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index 2a1264bfeecb2..a0c46c35aac8c 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -273,15 +273,10 @@ func (s *CMServer) Run(_ []string) error { } } - provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfiguration) - if err != nil { - glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") - } - volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, - kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration), + kubecontrollermanager.ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration), cloud, s.ClusterName, nil, diff --git a/pkg/volume/host_path/host_path.go b/pkg/volume/host_path/host_path.go index 93d20d62e4f44..747ef1ba1b8cb 100644 --- a/pkg/volume/host_path/host_path.go +++ b/pkg/volume/host_path/host_path.go @@ -43,17 +43,6 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin } } -func ProbeRecyclableVolumePlugins(recyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error), volumeConfig volume.VolumeConfig) []volume.VolumePlugin { - return []volume.VolumePlugin{ - &hostPathPlugin{ - host: nil, - newRecyclerFunc: recyclerFunc, - newProvisionerFunc: newProvisioner, - config: volumeConfig, - }, - } -} - type hostPathPlugin struct { host volume.VolumeHost // decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing. @@ -132,6 +121,9 @@ func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, err } func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + if !plugin.config.ProvisioningEnabled { + return nil, fmt.Errorf("Provisioning in volume plugin %q is disabled", plugin.GetPluginName()) + } if len(options.AccessModes) == 0 { options.AccessModes = plugin.GetAccessModes() } diff --git a/pkg/volume/host_path/host_path_test.go b/pkg/volume/host_path/host_path_test.go index 3e6b40945adae..97c8e477e54c6 100644 --- a/pkg/volume/host_path/host_path_test.go +++ b/pkg/volume/host_path/host_path_test.go @@ -153,7 +153,8 @@ func TestProvisioner(t *testing.T) { err := os.MkdirAll(tempPath, 0750) plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}), + volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)) spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}} plug, err := plugMgr.FindCreatablePluginBySpec(spec) if err != nil { diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index a9552571fad42..691a36a23c6af 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -288,6 +288,10 @@ type VolumeConfig struct { // the system and only understood by the binary hosting the plugin and the // plugin itself. OtherAttributes map[string]string + + // ProvosioningEnabled configures whether provisioning of this plugin is + // enabled or not. Currently used only in host_path plugin. + ProvisioningEnabled bool } // NewSpecFromVolume creates an Spec from an api.Volume From ef97bfe8403d6c7457fcc86108412854bb90434b Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:10 +0200 Subject: [PATCH 09/11] Update integration test --- .../persistent_volumes_test.go | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/test/integration/persistentvolumes/persistent_volumes_test.go b/test/integration/persistentvolumes/persistent_volumes_test.go index 62ff9b6e91a20..dad9173b5df5d 100644 --- a/test/integration/persistentvolumes/persistent_volumes_test.go +++ b/test/integration/persistentvolumes/persistent_volumes_test.go @@ -62,6 +62,8 @@ func init() { const defaultObjectCount = 100 const defaultSyncPeriod = 10 * time.Second +const provisionerPluginName = "kubernetes.io/mock-provisioner" + func getObjectCount() int { objectCount := defaultObjectCount if s := os.Getenv("KUBE_INTEGRATION_PV_OBJECTS"); s != "" { @@ -648,8 +650,20 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { defer watchPVC.Stop() // NOTE: This test cannot run in parallel, because it is creating and deleting - // non-namespaced objects (PersistenceVolumes). + // non-namespaced objects (PersistenceVolumes and StorageClasses). defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{}) + defer testClient.Extensions().StorageClasses().DeleteCollection(nil, api.ListOptions{}) + + storageClass := extensions.StorageClass{ + TypeMeta: unversioned.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: api.ObjectMeta{ + Name: "gold", + }, + ProvisionerType: provisionerPluginName, + } + testClient.Extensions().StorageClasses().Create(&storageClass) binder.Run() defer binder.Stop() @@ -659,7 +673,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { for i := 0; i < objCount; i++ { pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}) pvc.Annotations = map[string]string{ - "volume.alpha.kubernetes.io/storage-class": "", + "volume.alpha.kubernetes.io/storage-class": "gold", } pvcs[i] = pvc } @@ -885,7 +899,7 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server) (*client host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */) plugin := &volumetest.FakeVolumePlugin{ - PluginName: "plugin-name", + PluginName: provisionerPluginName, Host: host, Config: volume.VolumeConfig{}, LastProvisionerOptions: volume.VolumeOptions{}, From 4a9c3749cdec0c31ae6c280f7d54da8a733c9c97 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 15 Jul 2016 14:56:46 +0200 Subject: [PATCH 10/11] Update e2e test --- test/e2e/volume_provisioning.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/test/e2e/volume_provisioning.go b/test/e2e/volume_provisioning.go index 0def8e8b0665e..b7f0be9ce97f6 100644 --- a/test/e2e/volume_provisioning.go +++ b/test/e2e/volume_provisioning.go @@ -22,6 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/test/e2e/framework" @@ -52,6 +53,11 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() { framework.KubeDescribe("DynamicProvisioner", func() { It("should create and delete persistent volumes", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke") + + By("creating a claim with a dynamic provisioning annotation") + class := createStorageClass() + defer c.Extensions().StorageClasses().Delete(class.Name) + By("creating a claim with a dynamic provisioning annotation") claim := createClaim(ns) defer func() { @@ -130,7 +136,7 @@ func createClaim(ns string) *api.PersistentVolumeClaim { GenerateName: "pvc-", Namespace: ns, Annotations: map[string]string{ - "volume.alpha.kubernetes.io/storage-class": "", + "volume.alpha.kubernetes.io/storage-class": "gold", }, }, Spec: api.PersistentVolumeClaimSpec{ @@ -192,3 +198,26 @@ func runInPodWithVolume(c *client.Client, ns, claimName, command string) { framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace)) } + +func createStorageClass() *extensions.StorageClass { + var pluginName string + + switch { + case framework.ProviderIs("gke"), framework.ProviderIs("gce"): + pluginName = "kubernetes.io/gce-pd" + case framework.ProviderIs("aws"): + pluginName = "kubernetes.io/aws-ebs" + case framework.ProviderIs("openstack"): + pluginName = "kubernetes.io/cinder" + } + + return &extensions.StorageClass{ + TypeMeta: unversioned.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: api.ObjectMeta{ + Name: "gold", + }, + ProvisionerType: pluginName, + } +} From d8b91816bd6323c380243147915b00ab185b67da Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Wed, 20 Jul 2016 09:54:30 -0400 Subject: [PATCH 11/11] create cinder volumes with VolumeType Signed-off-by: Huamin Chen --- .../v1beta1/zz_generated.conversion.go | 88 +++++++++++++++++++ .../v1beta1/zz_generated.deepcopy.go | 45 ++++++++++ pkg/apis/extensions/zz_generated.deepcopy.go | 45 ++++++++++ .../providers/openstack/openstack.go | 6 +- .../providers/rackspace/rackspace.go | 2 +- pkg/volume/cinder/attacher_test.go | 2 +- pkg/volume/cinder/cinder.go | 2 +- pkg/volume/cinder/cinder_util.go | 18 +++- 8 files changed, 203 insertions(+), 5 deletions(-) diff --git a/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 38f3157786669..e0450c937e017 100644 --- a/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -156,6 +156,10 @@ func init() { Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_StorageClass_To_extensions_StorageClass, + Convert_extensions_StorageClass_To_v1beta1_StorageClass, + Convert_v1beta1_StorageClassList_To_extensions_StorageClassList, + Convert_extensions_StorageClassList_To_v1beta1_StorageClassList, Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, @@ -2414,6 +2418,90 @@ func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) } +func autoConvert_v1beta1_StorageClass_To_extensions_StorageClass(in *StorageClass, out *extensions.StorageClass, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + out.ProvisionerType = in.ProvisionerType + out.ProvisionerParameters = in.ProvisionerParameters + return nil +} + +func Convert_v1beta1_StorageClass_To_extensions_StorageClass(in *StorageClass, out *extensions.StorageClass, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClass_To_extensions_StorageClass(in, out, s) +} + +func autoConvert_extensions_StorageClass_To_v1beta1_StorageClass(in *extensions.StorageClass, out *StorageClass, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + out.ProvisionerType = in.ProvisionerType + out.ProvisionerParameters = in.ProvisionerParameters + return nil +} + +func Convert_extensions_StorageClass_To_v1beta1_StorageClass(in *extensions.StorageClass, out *StorageClass, s conversion.Scope) error { + return autoConvert_extensions_StorageClass_To_v1beta1_StorageClass(in, out, s) +} + +func autoConvert_v1beta1_StorageClassList_To_extensions_StorageClassList(in *StorageClassList, out *extensions.StorageClassList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.StorageClass, len(*in)) + for i := range *in { + if err := Convert_v1beta1_StorageClass_To_extensions_StorageClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_StorageClassList_To_extensions_StorageClassList(in *StorageClassList, out *extensions.StorageClassList, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClassList_To_extensions_StorageClassList(in, out, s) +} + +func autoConvert_extensions_StorageClassList_To_v1beta1_StorageClassList(in *extensions.StorageClassList, out *StorageClassList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := Convert_extensions_StorageClass_To_v1beta1_StorageClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_extensions_StorageClassList_To_v1beta1_StorageClassList(in *extensions.StorageClassList, out *StorageClassList, s conversion.Scope) error { + return autoConvert_extensions_StorageClassList_To_v1beta1_StorageClassList(in, out, s) +} + func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) if in.Ranges != nil { diff --git a/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go index bcd1e1c7c3650..ee8ac3cd37500 100644 --- a/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go @@ -94,6 +94,8 @@ func init() { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(func() *Scale { var x *Scale; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(func() *ScaleSpec { var x *ScaleSpec; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(func() *ScaleStatus { var x *ScaleStatus; return x }())}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClass, InType: reflect.TypeOf(func() *StorageClass { var x *StorageClass; return x }())}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClassList, InType: reflect.TypeOf(func() *StorageClassList { var x *StorageClassList; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubresourceReference, InType: reflect.TypeOf(func() *SubresourceReference { var x *SubresourceReference; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(func() *SupplementalGroupsStrategyOptions { var x *SupplementalGroupsStrategyOptions; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResource, InType: reflect.TypeOf(func() *ThirdPartyResource { var x *ThirdPartyResource; return x }())}, @@ -1333,6 +1335,49 @@ func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion } } +func DeepCopy_v1beta1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + out.TypeMeta = in.TypeMeta + if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + out.ProvisionerType = in.ProvisionerType + if in.ProvisionerParameters != nil { + in, out := &in.ProvisionerParameters, &out.ProvisionerParameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.ProvisionerParameters = nil + } + return nil + } +} + +func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil + } +} + func DeepCopy_v1beta1_SubresourceReference(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*SubresourceReference) diff --git a/pkg/apis/extensions/zz_generated.deepcopy.go b/pkg/apis/extensions/zz_generated.deepcopy.go index 8cd3fc64224b2..19e2e9eaf8ac6 100644 --- a/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/pkg/apis/extensions/zz_generated.deepcopy.go @@ -79,6 +79,8 @@ func init() { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Scale, InType: reflect.TypeOf(func() *Scale { var x *Scale; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleSpec, InType: reflect.TypeOf(func() *ScaleSpec { var x *ScaleSpec; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleStatus, InType: reflect.TypeOf(func() *ScaleStatus { var x *ScaleStatus; return x }())}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_StorageClass, InType: reflect.TypeOf(func() *StorageClass { var x *StorageClass; return x }())}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_StorageClassList, InType: reflect.TypeOf(func() *StorageClassList { var x *StorageClassList; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(func() *SupplementalGroupsStrategyOptions { var x *SupplementalGroupsStrategyOptions; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResource, InType: reflect.TypeOf(func() *ThirdPartyResource { var x *ThirdPartyResource; return x }())}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceData, InType: reflect.TypeOf(func() *ThirdPartyResourceData { var x *ThirdPartyResourceData; return x }())}, @@ -974,6 +976,49 @@ func DeepCopy_extensions_ScaleStatus(in interface{}, out interface{}, c *convers } } +func DeepCopy_extensions_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + out.TypeMeta = in.TypeMeta + if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + out.ProvisionerType = in.ProvisionerType + if in.ProvisionerParameters != nil { + in, out := &in.ProvisionerParameters, &out.ProvisionerParameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.ProvisionerParameters = nil + } + return nil + } +} + +func DeepCopy_extensions_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil + } +} + func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*SupplementalGroupsStrategyOptions) diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index be941b52cf7a1..83d6742d5b3de 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -660,7 +660,7 @@ func (os *OpenStack) getVolume(diskName string) (volumes.Volume, error) { } // Create a volume of given size (in GiB) -func (os *OpenStack) CreateVolume(name string, size int, tags *map[string]string) (volumeName string, err error) { +func (os *OpenStack) CreateVolume(name string, size int, opt *map[string]string, tags *map[string]string) (volumeName string, err error) { sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{ Region: os.region, @@ -675,6 +675,10 @@ func (os *OpenStack) CreateVolume(name string, size int, tags *map[string]string Name: name, Size: size, } + if opt != nil { + opts.VolumeType = (*opt)["VolumeType"] + opts.Availability = (*opt)["Availability"] + } if tags != nil { opts.Metadata = *tags } diff --git a/pkg/cloudprovider/providers/rackspace/rackspace.go b/pkg/cloudprovider/providers/rackspace/rackspace.go index d036096624502..72c63f1f32243 100644 --- a/pkg/cloudprovider/providers/rackspace/rackspace.go +++ b/pkg/cloudprovider/providers/rackspace/rackspace.go @@ -474,7 +474,7 @@ func (os *Rackspace) GetZone() (cloudprovider.Zone, error) { } // Create a volume of given size (in GiB) -func (rs *Rackspace) CreateVolume(name string, size int, tags *map[string]string) (volumeName string, err error) { +func (rs *Rackspace) CreateVolume(name string, size int, opt *map[string]string, tags *map[string]string) (volumeName string, err error) { return "", errors.New("unimplemented") } diff --git a/pkg/volume/cinder/attacher_test.go b/pkg/volume/cinder/attacher_test.go index dbc19273a82d8..5501b92f8eb83 100644 --- a/pkg/volume/cinder/attacher_test.go +++ b/pkg/volume/cinder/attacher_test.go @@ -391,7 +391,7 @@ func (testcase *testcase) GetAttachmentDiskPath(instanceID string, diskName stri return expected.retPath, expected.ret } -func (testcase *testcase) CreateVolume(name string, size int, tags *map[string]string) (volumeName string, err error) { +func (testcase *testcase) CreateVolume(name string, size int, opt *map[string]string, tags *map[string]string) (volumeName string, err error) { return "", errors.New("Not implemented") } diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index 9d314bc13e6e0..315609142bbb3 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -45,7 +45,7 @@ type CinderProvider interface { AttachDisk(instanceID string, diskName string) (string, error) DetachDisk(instanceID string, partialDiskId string) error DeleteVolume(volumeName string) error - CreateVolume(name string, size int, tags *map[string]string) (volumeName string, err error) + CreateVolume(name string, size int, opt *map[string]string, tags *map[string]string) (volumeName string, err error) GetDevicePath(diskId string) string InstanceID() (string, error) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index afd22d2cb3339..00dda2418ad36 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -18,7 +18,9 @@ package cinder import ( "errors" + "fmt" "os" + "strings" "time" "github.com/golang/glog" @@ -139,7 +141,21 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s // Cinder works with gigabytes, convert to GiB with rounding up volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters - name, err = cloud.CreateVolume(name, volSizeGB, c.options.CloudTags) + opt := make(map[string]string) + // Apply ProvisionerParameters (case-insensitive). We leave validation of + // the values to the cloud provider. + for k, v := range c.options.ProvisionerParameters { + switch strings.ToLower(k) { + case "type": + opt["VolumeType"] = v + case "zone": + opt["Availability"] = v + default: + return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName()) + } + } + + name, err = cloud.CreateVolume(name, volSizeGB, &opt, c.options.CloudTags) if err != nil { glog.V(2).Infof("Error creating cinder volume: %v", err) return "", 0, err