import some integration tests from kubeval

This commit is contained in:
Yann Hamon 2020-06-04 00:33:01 +02:00
parent 6a7affc438
commit 3751a26ae7
31 changed files with 1368 additions and 5 deletions

View file

@ -4,4 +4,38 @@
run bin/kubeconform -file fixtures/valid.yaml -summary
[ "$status" -eq 0 ]
[ "$output" = "Run summary - Valid: 1, Invalid: 0, Errors: 0 Skipped: 0" ]
}
}
@test "Pass when parsing a Kubernetes file with string and integer quantities" {
run bin/kubeconform -verbose -file fixtures/quantity.yaml
[ "$status" -eq 0 ]
[ "$output" = "fixtures/quantity.yaml - LimitRange is valid" ]
}
@test "Pass when parsing a valid Kubernetes config file with null arrays" {
run bin/kubeconform -verbose -file fixtures/null_string.yaml
[ "$status" -eq 0 ]
[ "$output" = "fixtures/null_string.yaml - Service is valid" ]
}
@test "Pass when parsing a multi-document config file" {
run bin/kubeconform -summary -file fixtures/multi_valid.yaml
[ "$status" -eq 0 ]
[ "$output" = "Run summary - Valid: 6, Invalid: 0, Errors: 0 Skipped: 0" ]
}
@test "Fail when parsing a multi-document config file with one invalid resource" {
run bin/kubeconform -file fixtures/multi_invalid.yaml
[ "$status" -eq 1 ]
}
@test "Fail when parsing an invalid Kubernetes config file" {
run bin/kubeconform -file fixtures/invalid.yaml
[ "$status" -eq 1 ]
}
@test "Return relevant error for non-existent file" {
run bin/kubeconform -file fixtures/not-here
[ "$status" -eq 1 ]
[ $(expr "$output" : "^failed opening fixtures/not-here") -ne 0 ]
}

0
fixtures/blank.yaml Executable file
View file

3
fixtures/comment.yaml Executable file
View file

@ -0,0 +1,3 @@
---
# Arbitrary comment

View file

@ -0,0 +1,39 @@
# Two objects with same name in same namespace, resource of non-namespaced kind
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv0003
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: slow
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: /tmp
server: 172.17.0.2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv0003
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: slow
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: /tmp
server: 172.17.0.2

View file

@ -0,0 +1,41 @@
# Two objects with same name in same namespace, but of a kind configured to be skipped
apiVersion: v1
kind: SkipThisKind
metadata:
name: "identical"
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: SkipThisKind
metadata:
name: "identical"
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View file

@ -0,0 +1,43 @@
# Two objects with same name in same namespace (one of them not given, i.e. will use default namespace as passed to kubeval)
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
namespace: the-default-namespace
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
# namespace not given
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View file

@ -0,0 +1,43 @@
# Two objects with same name in same namespace
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
namespace: x
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
namespace: x
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

41
fixtures/duplicates.yaml Executable file
View file

@ -0,0 +1,41 @@
# Two objects with same name in same namespace
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

12
fixtures/extra_property.yaml Executable file
View file

@ -0,0 +1,12 @@
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
replicas: 2
template:
spec:
containers:
- image: nginx
name: nginx

13
fixtures/full_domain_group.yaml Executable file
View file

@ -0,0 +1,13 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tiller-binding
namespace: dev2
subjects:
- kind: ServiceAccount
name: tiller
namespace: dev2
roleRef:
kind: Role
name: tiller-manager
apiGroup: rbac.authorization.k8s.io

17
fixtures/int_or_string.yaml Executable file
View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

19
fixtures/invalid.yaml Executable file
View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
spec:
replicas: asd"
selector:
app: nginx
templates:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

39
fixtures/list_invalid.yaml Executable file
View file

@ -0,0 +1,39 @@
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
tier: backend
role: master
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
tier: backend
role: master
- apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
spec:
replicas: asd"
selector:
app: nginx
templates:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

174
fixtures/list_valid.yaml Executable file
View file

@ -0,0 +1,174 @@
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
tier: backend
role: master
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
tier: backend
role: master
- apiVersion: v1
kind: ReplicationController
metadata:
name: redis-master
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: master
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: master
# tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379
- apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
tier: backend
role: slave
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
tier: backend
role: slave
- apiVersion: v1
kind: ReplicationController
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: slave
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379
- apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend
- apiVersion: v1
kind: ReplicationController
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: guestbook
tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v3
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80

1
fixtures/missing_kind.yaml Executable file
View file

@ -0,0 +1 @@
key: value

View file

@ -0,0 +1 @@
kind:

176
fixtures/multi_invalid.yaml Executable file
View file

@ -0,0 +1,176 @@
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
tier: backend
role: master
spec:
ports:
# the port that this service should serve on
- port: sds
targetPort: 6379
selector:
app: redis
tier: backend
role: master
---
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-master
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: master
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: master
# tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
tier: backend
role: slave
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
tier: backend
role: slave
---
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: slave
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend
---
apiVersion: v1
kind: ReplicationController
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: guestbook
tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v3
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80

View file

@ -0,0 +1,9 @@
kind:
---
kind:
---
kind:
---
kind:
---
kind:

181
fixtures/multi_valid.yaml Executable file
View file

@ -0,0 +1,181 @@
---
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
tier: backend
role: master
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
tier: backend
role: master
---
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-master
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: master
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: master
# tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
tier: backend
role: slave
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
tier: backend
role: slave
---
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: slave
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend
---
apiVersion: v1
kind: ReplicationController
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: guestbook
tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v3
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80
---
---
# an empty resource with comments
---

184
fixtures/multi_valid_source.yaml Executable file
View file

@ -0,0 +1,184 @@
---
# Source: chart/templates/primary.yaml
apiVersion: v1
kind: Service
metadata:
name: redis-primary
labels:
app: redis
tier: backend
role: primary
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
tier: backend
role: primary
---
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-primary
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: primary
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: primary
# tier: backend
template:
metadata:
labels:
app: redis
role: primary
tier: backend
spec:
containers:
- name: primary
image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379
---
# Source: chart/templates/secondary.yaml
apiVersion: v1
kind: Service
metadata:
name: redis-secondary
labels:
app: redis
tier: backend
role: secondary
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
tier: backend
role: secondary
---
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-secondary
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: secondary
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: secondary
# tier: backend
template:
metadata:
labels:
app: redis
role: secondary
tier: backend
spec:
containers:
- name: secondary
image: gcr.io/google_samples/gb-redissecondary:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the primary
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379
---
# Source: chart/templates/frontend.yaml
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend
---
apiVersion: v1
kind: ReplicationController
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: guestbook
tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v3
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80
---
---
# an empty resource with comments
---

40
fixtures/null_array.yaml Executable file
View file

@ -0,0 +1,40 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.1
ports:
- containerPort: 9090
protocol: TCP
args:
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

19
fixtures/null_string.yaml Executable file
View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.service.type: LoadBalancer
creationTimestamp: null
labels:
io.kompose.service: frontend
name: frontend
spec:
ports:
- name: "80"
port: 80
targetPort: 80
selector:
io.kompose.service: frontend
type: LoadBalancer
status:
loadBalancer: {}

11
fixtures/quantity.yaml Executable file
View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: LimitRange
metadata:
name: mem-limit-range
spec:
limits:
- default:
memory: 512Mi
defaultRequest:
memory: 256000
type: Container

View file

@ -0,0 +1,49 @@
# Two objects with same name in same namespace, and having the same kind, but
# of different API (apps/v1 vs. apps/v1beta1). This is important when CRDs
# introduce overlapping `metadata:name` values, e.g. `Deployment` in
# `my-awesome-cd-tool.io/v1` (contrived scenario).
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80

View file

@ -0,0 +1,43 @@
# Two objects with same name in different namespace, one of them being the configured default namespace
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
namespace: a
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
namespace: the-default-namespace
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View file

@ -0,0 +1,43 @@
# Two objects with same name in different namespace
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
namespace: a
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
namespace: b
spec:
replicas: 2
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

18
fixtures/test_crd.yaml Executable file
View file

@ -0,0 +1,18 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
name: test-secret
namespace: test-namespace
spec:
encryptedData:
SOME_ENCRYPTED_DATA: c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
name: test-secret-clone
namespace: test-namespace
spec:
encryptedData:
SOME_ENCRYPTED_DATA: c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2

View file

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: unconventional-keys
data:
5: "integer"
3.14: "float"
true: "boolean"

46
fixtures/valid.json Executable file
View file

@ -0,0 +1,46 @@
{
"apiVersion": "apps/v1beta1",
"kind": "Deployment",
"metadata": {
"name": "nginx-deployment",
"namespace": "default"
},
"spec": {
"replicas": 2,
"template": {
"spec": {
"affinity": { },
"containers": [
{
"args": [ ],
"command": [ ],
"env": [ ],
"envFrom": [ ],
"image": "nginx:1.7.9",
"lifecycle": { },
"livenessProbe": { },
"name": "nginx",
"ports": [
{
"containerPort": 80,
"name": "http"
}
],
"readinessProbe": { },
"resources": { },
"securityContext": { },
"volumeMounts": [ ]
}
],
"hostMappings": [ ],
"imagePullSecrets": [ ],
"initContainers": [ ],
"nodeSelector": { },
"securityContext": { },
"tolerations": [ ],
"volumes": [ ]
}
}
},
"status": { }
}

12
fixtures/valid_version.yaml Executable file
View file

@ -0,0 +1,12 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: my-app-hpa
spec:
minReplicas: 100
maxReplicas: 300
scaleTargetRef:
apiVersion: extensions/v1beta1
kind: Deployment
name: my-app
targetCPUUtilizationPercentage: 15

12
main.go
View file

@ -156,7 +156,7 @@ func skipKindsMap(skipKindsCSV string) map[string]bool {
return skipKinds
}
func processResults(o output.Output, validationResults chan []validationResult, result chan<- bool ) {
func processResults(o output.Output, validationResults chan []validationResult, result chan<- bool) {
success := true
for results := range validationResults {
for _, result := range results {
@ -224,14 +224,13 @@ func realMain() int {
close(fileBatches)
}()
var o output.Output
if o, err = getLogger(outputFormat, summary, verbose); err != nil {
fmt.Println(err)
return 1
}
res := make (chan bool)
res := make(chan bool)
validationResults := make(chan []validationResult)
go processResults(o, validationResults, res)
@ -246,7 +245,12 @@ func realMain() int {
for _, filename := range fileBatch {
f, err := os.Open(filename)
if err != nil {
log.Printf("failed opening %s\n", filename)
fmt.Printf("failed opening %s\n", filename)
validationResults <- []validationResult{{
filename: filename,
err: err,
skipped: true,
}}
continue
}