Compare commits

..

No commits in common. "master" and "v0.6.1" have entirely different histories.

207 changed files with 5559 additions and 52208 deletions

View file

@ -5,7 +5,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: test
run: make docker-test
@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: test
working-directory: ./scripts
@ -34,13 +34,10 @@ jobs:
if: startsWith(github.ref, 'refs/tags/v')
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0 # https://github.com/goreleaser/goreleaser-action/issues/56
- uses: docker/setup-qemu-action@v3
- uses: docker/setup-buildx-action@v3
- name: goreleaser
run: |
echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin

View file

@ -32,63 +32,23 @@ archives:
dockers:
- image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-amd64'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-amd64'
dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--pull"
- "--platform=linux/amd64"
goos: linux
goarch: amd64
- image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-arm64'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-arm64'
dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--pull"
- "--platform=linux/arm64"
goos: linux
goarch: arm64
- image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-alpine'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-alpine'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-amd64-alpine'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-amd64-alpine'
dockerfile: Dockerfile-alpine
use: buildx
build_flag_templates:
- "--pull"
- "--platform=linux/amd64"
goos: linux
goarch: amd64
- image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-arm64-alpine'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-arm64-alpine'
dockerfile: Dockerfile-alpine
use: buildx
build_flag_templates:
- "--pull"
- "--platform=linux/arm64"
goos: linux
goarch: arm64
docker_manifests:
- name_template: 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}'
image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-amd64'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-arm64'
- name_template: 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest'
image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-amd64'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-arm64'
- name_template: 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-alpine'
image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-amd64-alpine'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:latest-arm64-alpine'
- name_template: 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-alpine'
image_templates:
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-amd64-alpine'
- 'ghcr.io/{{.Env.GIT_OWNER}}/kubeconform:{{ .Tag }}-arm64-alpine'
checksum:
name_template: 'CHECKSUMS'

View file

@ -1,14 +1,15 @@
FROM alpine:3.21.3 as certs
FROM alpine:3.14 as certs
RUN apk add ca-certificates
FROM scratch AS kubeconform
LABEL org.opencontainers.image.authors="Yann Hamon <yann@mandragor.org>" \
LABEL org.opencontainers.image.authors="yann@mandragor.org" \
org.opencontainers.image.source="https://github.com/yannh/kubeconform/" \
org.opencontainers.image.description="A Kubernetes manifests validation tool" \
org.opencontainers.image.documentation="https://github.com/yannh/kubeconform/" \
org.opencontainers.image.licenses="Apache License 2.0" \
org.opencontainers.image.title="kubeconform" \
org.opencontainers.image.url="https://github.com/yannh/kubeconform/"
MAINTAINER Yann HAMON <yann@mandragor.org>
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY kubeconform /
ENTRYPOINT ["/kubeconform"]

View file

@ -1,12 +1,12 @@
FROM alpine:3.20.2
LABEL org.opencontainers.image.authors="Yann Hamon <yann@mandragor.org>" \
FROM alpine:3.14 as certs
LABEL org.opencontainers.image.authors="yann@mandragor.org" \
org.opencontainers.image.source="https://github.com/yannh/kubeconform/" \
org.opencontainers.image.description="A Kubernetes manifests validation tool" \
org.opencontainers.image.documentation="https://github.com/yannh/kubeconform/" \
org.opencontainers.image.licenses="Apache License 2.0" \
org.opencontainers.image.title="kubeconform" \
org.opencontainers.image.url="https://github.com/yannh/kubeconform/"
MAINTAINER Yann HAMON <yann@mandragor.org>
RUN apk add ca-certificates
COPY kubeconform /
ENTRYPOINT ["/kubeconform"]

View file

@ -1,5 +1,5 @@
FROM bats/bats:1.11.0
FROM bats/bats:v1.2.1
RUN apk --no-cache add ca-certificates parallel libxml2-utils
COPY bin/kubeconform /code/bin/
COPY dist/kubeconform_linux_amd64_v1/kubeconform /code/bin/
COPY acceptance.bats acceptance-nonetwork.bats /code/
COPY fixtures /code/fixtures

View file

@ -5,10 +5,9 @@ RELEASE_VERSION ?= latest
.PHONY: local-test local-build local-build-static docker-test docker-build docker-build-static build-bats docker-acceptance release update-deps build-single-target
local-test:
go test -race ./... -count=1
go test -race ./...
local-build:
git config --global --add safe.directory $$PWD
go build -o bin/ ./...
local-build-static:
@ -16,13 +15,13 @@ local-build-static:
# These only used for development. Release artifacts and docker images are produced by goreleaser.
docker-test:
docker run -t -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform golang:1.24.3 make local-test
docker run -t -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform golang:1.17 make local-test
docker-build:
docker run -t -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform golang:1.24.3 make local-build
docker run -t -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform golang:1.17 make local-build
docker-build-static:
docker run -t -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform golang:1.24.3 make local-build-static
docker run -t -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform golang:1.17 make local-build-static
build-bats:
docker build -t bats -f Dockerfile.bats .
@ -32,11 +31,11 @@ docker-acceptance: build-bats
docker run --network none -t bats -p acceptance-nonetwork.bats
goreleaser-build-static:
docker run -t -e GOOS=linux -e GOARCH=amd64 -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform goreleaser/goreleaser:v2.9.0 build --clean --single-target --snapshot
docker run -t -e GOOS=linux -e GOARCH=amd64 -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform goreleaser/goreleaser:v1.11.5 build --single-target --skip-post-hooks --rm-dist --snapshot
cp dist/kubeconform_linux_amd64_v1/kubeconform bin/
release:
docker run -e GITHUB_TOKEN -e GIT_OWNER -t -v /var/run/docker.sock:/var/run/docker.sock -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform goreleaser/goreleaser:v2.9.0 release --clean
docker run -e GITHUB_TOKEN -e GIT_OWNER -t -v /var/run/docker.sock:/var/run/docker.sock -v $$PWD:/go/src/github.com/yannh/kubeconform -w /go/src/github.com/yannh/kubeconform goreleaser/goreleaser:v1.11.5 release --rm-dist
update-deps:
go get -u ./...

View file

@ -1,7 +1,7 @@
<img width="50%" alt="Kubeconform-GitHub-Hero" src="https://user-images.githubusercontent.com/19731161/142411871-f695e40c-bfa8-43ca-97c0-94c256749732.png">
<hr>
[![Build status](https://github.com/yannh/kubeconform/actions/workflows/main.yml/badge.svg)](https://github.com/yannh/kubeconform/actions?query=branch%3Amaster)
[![Build status](https://github.com/yannh/kubeconform/workflows/build/badge.svg?branch=master)](https://github.com/yannh/kubeconform/actions?query=branch%3Amaster)
[![Homebrew](https://img.shields.io/badge/dynamic/json.svg?url=https://formulae.brew.sh/api/formula/kubeconform.json&query=$.versions.stable&label=homebrew)](https://formulae.brew.sh/formula/kubeconform)
[![Go Report card](https://goreportcard.com/badge/github.com/yannh/kubeconform)](https://goreportcard.com/report/github.com/yannh/kubeconform)
[![PkgGoDev](https://pkg.go.dev/badge/github.com/yannh/kubeconform/pkg/validator)](https://pkg.go.dev/github.com/yannh/kubeconform/pkg/validator)
@ -44,7 +44,7 @@ sys 0m1,069s
* [Usage examples](#Usage-examples)
* [Proxy support](#Proxy-support)
* [Overriding schemas location](#Overriding-schemas-location)
* [CustomResourceDefinition (CRD) Support](#CustomResourceDefinition-CRD-Support)
* [CustomResourceDefinition (CRD) Support](#CustomResourceDefinition-(CRD)-Support)
* [OpenShift schema Support](#OpenShift-schema-Support)
* [Integrating Kubeconform in the CI](#Integrating-Kubeconform-in-the-CI)
* [Github Workflow](#Github-Workflow)
@ -81,12 +81,6 @@ If you are a [Homebrew](https://brew.sh/) user, you can install by running:
$ brew install kubeconform
```
If you are a Windows user, you can install with [winget](https://learn.microsoft.com/en-us/windows/package-manager/winget/) by running:
```cmd
winget install YannHamon.kubeconform
```
You can also download the latest version from the [release page](https://github.com/yannh/kubeconform/releases).
Another way of installation is via Golang's package manager:
@ -103,39 +97,39 @@ $ go install github.com/yannh/kubeconform/cmd/kubeconform@latest
```
$ kubeconform -h
Usage: kubeconform [OPTION]... [FILE OR FOLDER]...
Usage: ./bin/kubeconform [OPTION]... [FILE OR FOLDER]...
-cache string
cache schemas downloaded via HTTP to this folder
cache schemas downloaded via HTTP to this folder
-debug
print debug information
print debug information
-exit-on-error
immediately stop execution when the first error is encountered
-h show help information
immediately stop execution when the first error is encountered
-h show help information
-ignore-filename-pattern value
regular expression specifying paths to ignore (can be specified multiple times)
regular expression specifying paths to ignore (can be specified multiple times)
-ignore-missing-schemas
skip files with missing schemas instead of failing
skip files with missing schemas instead of failing
-insecure-skip-tls-verify
disable verification of the server's SSL certificate. This will make your HTTPS connections insecure
disable verification of the server\'s SSL certificate. This will make your HTTPS connections insecure
-kubernetes-version string
version of Kubernetes to validate against, e.g.: 1.18.0 (default "master")
version of Kubernetes to validate against, e.g.: 1.18.0 (default "master")
-n int
number of goroutines to run concurrently (default 4)
number of goroutines to run concurrently (default 4)
-output string
output format - json, junit, pretty, tap, text (default "text")
output format - json, junit, tap, text (default "text")
-reject string
comma-separated list of kinds or GVKs to reject
comma-separated list of kinds or GVKs to reject
-schema-location value
override schemas location search path (can be specified multiple times)
override schemas location search path (can be specified multiple times)
-skip string
comma-separated list of kinds or GVKs to ignore
comma-separated list of kinds or GVKs to ignore
-strict
disallow additional properties not in schema or duplicated keys
disallow additional properties not in schema or duplicated keys
-summary
print a summary at the end (ignored for junit output)
-v show version information
print a summary at the end (ignored for junit output)
-v show version information
-verbose
print results for all resources (ignored for tap and junit output)
print results for all resources (ignored for tap and junit output)
```
### Usage examples
@ -256,16 +250,12 @@ $ python ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/a
JSON schema written to trainingjob_v1.json
```
By default, the file name output format is `{kind}_{version}`. The `FILENAME_FORMAT` environment variable can be used to change the output file name (Available variables: `kind`, `group`, `fullgroup`, `version`):
By default, the file name output format is `{kind}_{version}`. The `FILENAME_FORMAT` environment variable can be used to change the output file name (Available variables: `kind`, `group`, `version`):
```
$ export FILENAME_FORMAT='{kind}-{group}-{version}'
$ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml
JSON schema written to trainingjob-sagemaker-v1.json
$ export FILENAME_FORMAT='{kind}-{fullgroup}-{version}'
$ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml
JSON schema written to trainingjob-sagemaker.aws.amazon.com-v1.json
```
After converting your CRDs to JSON schema files, you can use `kubeconform` to validate your CRs against them:
@ -308,7 +298,7 @@ jobs:
- name: login to Github Packages
run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin
- uses: actions/checkout@v2
- uses: docker://ghcr.io/yannh/kubeconform:latest
- uses: docker://ghcr.io/yannh/kubeconform:master
with:
entrypoint: '/kubeconform'
args: "-summary -output json kubeconfigs/"
@ -331,7 +321,7 @@ lint-kubeconform:
name: ghcr.io/yannh/kubeconform:latest-alpine
entrypoint: [""]
script:
- /kubeconform -summary -output json kubeconfigs/
- kubeconform
```
See [issue 106](https://github.com/yannh/kubeconform/issues/106) for more details.

View file

@ -19,8 +19,3 @@
run bin/kubeconform -schema-location 'fixtures/{{ .ResourceKind }}.json' -schema-location './fixtures/registry/{{ .ResourceKind }}{{ .KindSuffix }}.json' fixtures/test_crd.yaml
[ "$status" -eq 0 ]
}
@test "Pass when using a cached schema with external references" {
run bin/kubeconform -cache fixtures/cache -summary -schema-location 'https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/{{ .NormalizedKubernetesVersion }}{{ .StrictSuffix }}/{{ .ResourceKind }}{{ .KindSuffix }}.json' fixtures/valid.yaml
[ "$status" -eq 0 ]
}

View file

@ -36,7 +36,7 @@ resetCacheFolder() {
}
@test "Pass when parsing a valid Kubernetes config JSON file" {
run bin/kubeconform -kubernetes-version 1.20.0 -summary fixtures/valid.json
run bin/kubeconform -kubernetes-version 1.17.1 -summary fixtures/valid.json
[ "$status" -eq 0 ]
[ "$output" = "Summary: 1 resource found in 1 file - Valid: 1, Invalid: 0, Errors: 0, Skipped: 0" ]
}
@ -134,17 +134,17 @@ resetCacheFolder() {
}
@test "Fail when parsing a config with additional properties and strict set" {
run bin/kubeconform -strict -kubernetes-version 1.20.0 fixtures/extra_property.yaml
run bin/kubeconform -strict -kubernetes-version 1.16.0 fixtures/extra_property.yaml
[ "$status" -eq 1 ]
}
@test "Fail when parsing a config with duplicate properties and strict set" {
run bin/kubeconform -strict -kubernetes-version 1.20.0 fixtures/duplicate_property.yaml
run bin/kubeconform -strict -kubernetes-version 1.16.0 fixtures/duplicate_property.yaml
[ "$status" -eq 1 ]
}
@test "Pass when parsing a config with duplicate properties and strict NOT set" {
run bin/kubeconform -kubernetes-version 1.20.0 fixtures/duplicate_property.yaml
run bin/kubeconform -kubernetes-version 1.16.0 fixtures/duplicate_property.yaml
[ "$status" -eq 0 ]
}
@ -180,13 +180,6 @@ resetCacheFolder() {
[ "$status" -eq 1 ]
}
@test "Fail early when passing a non valid -kubernetes-version" {
run bin/kubeconform -kubernetes-version 1.25 fixtures/valid.yaml
[ "${lines[0]}" == 'invalid value "1.25" for flag -kubernetes-version: 1.25 is not a valid version. Valid values are "master" (default) or full version x.y.z (e.g. "1.27.2")' ]
[[ "${lines[1]}" == "Usage:"* ]]
[ "$status" -eq 1 ]
}
@test "Pass with a valid input when validating against openshift manifests" {
run bin/kubeconform -kubernetes-version 3.8.0 -schema-location 'https://raw.githubusercontent.com/garethr/openshift-json-schema/master/{{ .NormalizedKubernetesVersion }}-standalone{{ .StrictSuffix }}/{{ .ResourceKind }}.json' -summary fixtures/valid.yaml
[ "$status" -eq 0 ]
@ -276,13 +269,6 @@ resetCacheFolder() {
[ "$output" = "failed opening cache folder cache_does_not_exist: stat cache_does_not_exist: no such file or directory" ]
}
@test "HTTP references should be cached" {
resetCacheFolder
run bin/kubeconform -cache cache -summary -schema-location 'https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/{{ .NormalizedKubernetesVersion }}{{ .StrictSuffix }}/{{ .ResourceKind }}{{ .KindSuffix }}.json' fixtures/valid.yaml
[ "$status" -eq 0 ]
[ "`ls cache/ | wc -l`" -eq 2 ]
}
@test "Produces correct TAP output" {
run bin/kubeconform -output tap fixtures/valid.yaml
[ "$status" -eq 0 ]
@ -306,14 +292,14 @@ resetCacheFolder() {
@test "Fail when parsing a List that contains an invalid resource" {
run bin/kubeconform -summary fixtures/list_invalid.yaml
[ "$status" -eq 1 ]
[ "${lines[0]}" == 'fixtures/list_invalid.yaml - ReplicationController bob is invalid: problem validating schema. Check JSON formatting: jsonschema validation failed with '\''https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/master-standalone/replicationcontroller-v1.json#'\'' - at '\''/spec/replicas'\'': got string, want null or integer' ]
[ "${lines[0]}" == 'fixtures/list_invalid.yaml - ReplicationController bob is invalid: problem validating schema. Check JSON formatting: jsonschema: '\''/spec/replicas'\'' does not validate with https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/master-standalone/replicationcontroller-v1.json#/properties/spec/properties/replicas/type: expected integer or null, but got string' ]
[ "${lines[1]}" == 'Summary: 2 resources found in 1 file - Valid: 1, Invalid: 1, Errors: 0, Skipped: 0' ]
}
@test "Fail when parsing a List that contains an invalid resource from stdin" {
run bash -c "cat fixtures/list_invalid.yaml | bin/kubeconform -summary -"
[ "$status" -eq 1 ]
[ "${lines[0]}" == 'stdin - ReplicationController bob is invalid: problem validating schema. Check JSON formatting: jsonschema validation failed with '\''https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/master-standalone/replicationcontroller-v1.json#'\'' - at '\''/spec/replicas'\'': got string, want null or integer' ]
[ "${lines[0]}" == 'stdin - ReplicationController bob is invalid: problem validating schema. Check JSON formatting: jsonschema: '\''/spec/replicas'\'' does not validate with https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/master-standalone/replicationcontroller-v1.json#/properties/spec/properties/replicas/type: expected integer or null, but got string' ]
[ "${lines[1]}" == 'Summary: 2 resources found parsing stdin - Valid: 1, Invalid: 1, Errors: 0, Skipped: 0' ]
}
@ -354,14 +340,3 @@ resetCacheFolder() {
run xmllint --noout --schema fixtures/junit.xsd output.xml
[ "$status" -eq 0 ]
}
@test "passes when trying to use a CRD that does not have the JSONSchema set" {
run bash -c "bin/kubeconform -schema-location default -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' fixtures/httpproxy.yaml"
[ "$status" -eq 0 ]
}
# https://github.com/yannh/kubeconform/pull/309
@test "passes when validating duration not in ISO8601" {
run bash -c "./bin/kubeconform -schema-location ./fixtures/grafanaalertrulegroup_v1beta1.json ./fixtures/grafana-alert-rule-group-sample.yaml"
[ "$status" -eq 0 ]
}

View file

@ -46,8 +46,29 @@ func processResults(cancel context.CancelFunc, o output.Output, validationResult
return result
}
func kubeconform(cfg config.Config) int {
var err error
func realMain() int {
cfg, out, err := config.FromFlags(os.Args[0], os.Args[1:])
if out != "" {
o := os.Stderr
errCode := 1
if cfg.Help {
o = os.Stdout
errCode = 0
}
fmt.Fprintln(o, out)
return errCode
}
if cfg.Version {
fmt.Println(version)
return 0
}
if err != nil {
fmt.Fprintf(os.Stderr, "failed parsing command line: %s\n", err.Error())
return 1
}
cpuProfileFile := os.Getenv("KUBECONFORM_CPUPROFILE_FILE")
if cpuProfileFile != "" {
f, err := os.Create(cpuProfileFile)
@ -73,7 +94,7 @@ func kubeconform(cfg config.Config) int {
}
var o output.Output
if o, err = output.New(os.Stdout, cfg.OutputFormat, cfg.Summary, useStdin, cfg.Verbose); err != nil {
if o, err = output.New(cfg.OutputFormat, cfg.Summary, useStdin, cfg.Verbose); err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
@ -84,7 +105,7 @@ func kubeconform(cfg config.Config) int {
SkipTLS: cfg.SkipTLS,
SkipKinds: cfg.SkipKinds,
RejectKinds: cfg.RejectKinds,
KubernetesVersion: cfg.KubernetesVersion.String(),
KubernetesVersion: cfg.KubernetesVersion,
Strict: cfg.Strict,
IgnoreMissingSchemas: cfg.IgnoreMissingSchemas,
})
@ -157,27 +178,5 @@ func kubeconform(cfg config.Config) int {
}
func main() {
cfg, out, err := config.FromFlags(os.Args[0], os.Args[1:])
if out != "" {
o := os.Stderr
errCode := 1
if cfg.Help {
o = os.Stdout
errCode = 0
}
fmt.Fprintln(o, out)
os.Exit(errCode)
}
if cfg.Version {
fmt.Println(version)
return
}
if err != nil {
fmt.Fprintf(os.Stderr, "failed parsing command line: %s\n", err.Error())
os.Exit(1)
}
os.Exit(kubeconform(cfg))
os.Exit(realMain())
}

View file

@ -1,46 +0,0 @@
{
"description": "ReplicationController represents the configuration of a replication controller.",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": [
"string",
"null"
],
"enum": [
"v1"
]
},
"kind": {
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"type": [
"string",
"null"
],
"enum": [
"ReplicationController"
]
},
"metadata": {
"$ref": "https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/master/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
"description": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
},
"spec": {
"$ref": "https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/master/_definitions.json#/definitions/io.k8s.api.core.v1.ReplicationControllerSpec",
"description": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"
},
"status": {
"$ref": "https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/master/_definitions.json#/definitions/io.k8s.api.core.v1.ReplicationControllerStatus",
"description": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"
}
},
"type": "object",
"x-kubernetes-group-version-kind": [
{
"group": "",
"kind": "ReplicationController",
"version": "v1"
}
],
"$schema": "http://json-schema.org/schema#"
}

View file

@ -1,62 +0,0 @@
---
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaAlertRuleGroup
metadata:
name: grafanaalertrulegroup-sample
spec:
folderRef: test-folder
instanceSelector:
matchLabels:
dashboards: "grafana"
interval: 5m
rules:
- condition: B
data:
- datasourceUid: grafanacloud-demoinfra-prom
model:
datasource:
type: prometheus
uid: grafanacloud-demoinfra-prom
editorMode: code
expr: weather_temp_c{}
instant: true
intervalMs: 1000
legendFormat: __auto
maxDataPoints: 43200
range: false
refId: A
refId: A
relativeTimeRange:
from: 600
- datasourceUid: __expr__
model:
conditions:
- evaluator:
params:
- 0
type: lt
operator:
type: and
query:
params:
- C
reducer:
params: []
type: last
type: query
datasource:
type: __expr__
uid: __expr__
expression: A
intervalMs: 1000
maxDataPoints: 43200
refId: B
type: threshold
refId: B
relativeTimeRange:
from: 600
execErrState: Error
for: 5m0s
noDataState: NoData
title: Temperature below zero
uid: 4843de5c-4f8a-4af0-9509-23526a04faf8

View file

@ -1,334 +0,0 @@
{
"description": "GrafanaAlertRuleGroup is the Schema for the grafanaalertrulegroups API",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": "string"
},
"kind": {
"description": "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"type": "string"
},
"metadata": {
"type": "object"
},
"spec": {
"description": "GrafanaAlertRuleGroupSpec defines the desired state of GrafanaAlertRuleGroup",
"properties": {
"allowCrossNamespaceImport": {
"type": "boolean"
},
"editable": {
"description": "Whether to enable or disable editing of the alert rule group in Grafana UI",
"type": "boolean",
"x-kubernetes-validations": [
{
"message": "Value is immutable",
"rule": "self == oldSelf"
}
]
},
"folderRef": {
"description": "Match GrafanaFolders CRs to infer the uid",
"type": "string"
},
"folderUID": {
"description": "UID of the folder containing this rule group\nOverrides the FolderSelector",
"type": "string"
},
"instanceSelector": {
"description": "selects Grafanas for import",
"properties": {
"matchExpressions": {
"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
"items": {
"description": "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values.",
"properties": {
"key": {
"description": "key is the label key that the selector applies to.",
"type": "string"
},
"operator": {
"description": "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist.",
"type": "string"
},
"values": {
"description": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.",
"items": {
"type": "string"
},
"type": "array",
"x-kubernetes-list-type": "atomic"
}
},
"required": [
"key",
"operator"
],
"type": "object",
"additionalProperties": false
},
"type": "array",
"x-kubernetes-list-type": "atomic"
},
"matchLabels": {
"additionalProperties": {
"type": "string"
},
"description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
"type": "object"
}
},
"type": "object",
"x-kubernetes-map-type": "atomic",
"x-kubernetes-validations": [
{
"message": "Value is immutable",
"rule": "self == oldSelf"
}
],
"additionalProperties": false
},
"interval": {
"format": "duration",
"pattern": "^([0-9]+(\\.[0-9]+)?(ns|us|\u00b5s|ms|s|m|h))+$",
"type": "string"
},
"name": {
"description": "Name of the alert rule group. If not specified, the resource name will be used.",
"type": "string"
},
"resyncPeriod": {
"default": "10m",
"format": "duration",
"pattern": "^([0-9]+(\\.[0-9]+)?(ns|us|\u00b5s|ms|s|m|h))+$",
"type": "string"
},
"rules": {
"items": {
"description": "AlertRule defines a specific rule to be evaluated. It is based on the upstream model with some k8s specific type mappings",
"properties": {
"annotations": {
"additionalProperties": {
"type": "string"
},
"type": "object"
},
"condition": {
"type": "string"
},
"data": {
"items": {
"properties": {
"datasourceUid": {
"description": "Grafana data source unique identifier; it should be '__expr__' for a Server Side Expression operation.",
"type": "string"
},
"model": {
"description": "JSON is the raw JSON query and includes the above properties as well as custom properties.",
"x-kubernetes-preserve-unknown-fields": true
},
"queryType": {
"description": "QueryType is an optional identifier for the type of query.\nIt can be used to distinguish different types of queries.",
"type": "string"
},
"refId": {
"description": "RefID is the unique identifier of the query, set by the frontend call.",
"type": "string"
},
"relativeTimeRange": {
"description": "relative time range",
"properties": {
"from": {
"description": "from",
"format": "int64",
"type": "integer"
},
"to": {
"description": "to",
"format": "int64",
"type": "integer"
}
},
"type": "object",
"additionalProperties": false
}
},
"type": "object",
"additionalProperties": false
},
"type": "array"
},
"execErrState": {
"enum": [
"OK",
"Alerting",
"Error",
"KeepLast"
],
"type": "string"
},
"for": {
"format": "duration",
"pattern": "^([0-9]+(\\.[0-9]+)?(ns|us|\u00b5s|ms|s|m|h))+$",
"type": "string"
},
"isPaused": {
"type": "boolean"
},
"labels": {
"additionalProperties": {
"type": "string"
},
"type": "object"
},
"noDataState": {
"enum": [
"Alerting",
"NoData",
"OK",
"KeepLast"
],
"type": "string"
},
"notificationSettings": {
"properties": {
"group_by": {
"items": {
"type": "string"
},
"type": "array"
},
"group_interval": {
"type": "string"
},
"group_wait": {
"type": "string"
},
"mute_time_intervals": {
"items": {
"type": "string"
},
"type": "array"
},
"receiver": {
"type": "string"
},
"repeat_interval": {
"type": "string"
}
},
"required": [
"receiver"
],
"type": "object",
"additionalProperties": false
},
"title": {
"example": "Always firing",
"maxLength": 190,
"minLength": 1,
"type": "string"
},
"uid": {
"pattern": "^[a-zA-Z0-9-_]+$",
"type": "string"
}
},
"required": [
"condition",
"data",
"execErrState",
"for",
"noDataState",
"title",
"uid"
],
"type": "object",
"additionalProperties": false
},
"type": "array"
}
},
"required": [
"instanceSelector",
"interval",
"rules"
],
"type": "object",
"x-kubernetes-validations": [
{
"message": "Only one of FolderUID or FolderRef can be set",
"rule": "(has(self.folderUID) && !(has(self.folderRef))) || (has(self.folderRef) && !(has(self.folderUID)))"
}
],
"additionalProperties": false
},
"status": {
"description": "GrafanaAlertRuleGroupStatus defines the observed state of GrafanaAlertRuleGroup",
"properties": {
"conditions": {
"items": {
"description": "Condition contains details for one aspect of the current state of this API Resource.",
"properties": {
"lastTransitionTime": {
"description": "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.",
"format": "date-time",
"type": "string"
},
"message": {
"description": "message is a human readable message indicating details about the transition.\nThis may be an empty string.",
"maxLength": 32768,
"type": "string"
},
"observedGeneration": {
"description": "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance.",
"format": "int64",
"minimum": 0,
"type": "integer"
},
"reason": {
"description": "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty.",
"maxLength": 1024,
"minLength": 1,
"pattern": "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$",
"type": "string"
},
"status": {
"description": "status of the condition, one of True, False, Unknown.",
"enum": [
"True",
"False",
"Unknown"
],
"type": "string"
},
"type": {
"description": "type of condition in CamelCase or in foo.example.com/CamelCase.",
"maxLength": 316,
"pattern": "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$",
"type": "string"
}
},
"required": [
"lastTransitionTime",
"message",
"reason",
"status",
"type"
],
"type": "object",
"additionalProperties": false
},
"type": "array"
}
},
"required": [
"conditions"
],
"type": "object",
"additionalProperties": false
}
},
"type": "object"
}

View file

@ -1,13 +0,0 @@
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: basic
spec:
virtualhost:
fqdn: foo-basic.example.com
routes:
- conditions:
- prefix: /
services:
- name: s1
port: 80

View file

@ -1,34 +1,46 @@
{
"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {
"name": "bob"
},
"spec": {
"replicas": 2,
"selector": {
"app": "nginx"
},
"template": {
"metadata": {
"name": "nginx",
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx",
"ports": [
{
"containerPort": 80
}
]
}
]
"apiVersion": "apps/v1beta1",
"kind": "Deployment",
"metadata": {
"name": "nginx-deployment",
"namespace": "default"
},
"spec": {
"replicas": 2,
"template": {
"spec": {
"affinity": { },
"containers": [
{
"args": [ ],
"command": [ ],
"env": [ ],
"envFrom": [ ],
"image": "nginx:1.7.9",
"lifecycle": { },
"livenessProbe": { },
"name": "nginx",
"ports": [
{
"containerPort": 80,
"name": "http"
}
],
"readinessProbe": { },
"resources": { },
"securityContext": { },
"volumeMounts": [ ]
}
],
"hostMappings": [ ],
"imagePullSecrets": [ ],
"initContainers": [ ],
"nodeSelector": { },
"securityContext": { },
"tolerations": [ ],
"volumes": [ ]
}
}
}
}
},
"status": { }
}

10
go.mod
View file

@ -1,12 +1,10 @@
module github.com/yannh/kubeconform
go 1.24
go 1.17
require (
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1
golang.org/x/text v0.25.0
sigs.k8s.io/yaml v1.4.0
github.com/santhosh-tekuri/jsonschema/v5 v5.1.1
sigs.k8s.io/yaml v1.2.0
)
require github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
require gopkg.in/yaml.v2 v2.4.0 // indirect

33
go.sum
View file

@ -1,26 +1,11 @@
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 h1:lEOLY2vyGIqKWUI9nzsOJRV3mb3WC9dXYORsLEUcoeY=
github.com/santhosh-tekuri/jsonschema/v5 v5.1.1/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

4
pkg/cache/cache.go vendored
View file

@ -1,6 +1,6 @@
package cache
type Cache interface {
Get(key string) (any, error)
Set(key string, schema any) error
Get(resourceKind, resourceAPIVersion, k8sVersion string) (interface{}, error)
Set(resourceKind, resourceAPIVersion, k8sVersion string, schema interface{}) error
}

18
pkg/cache/inmemory.go vendored
View file

@ -10,21 +10,26 @@ import (
// - This cache caches the parsed Schemas
type inMemory struct {
sync.RWMutex
schemas map[string]any
schemas map[string]interface{}
}
// New creates a new cache for downloaded schemas
func NewInMemoryCache() Cache {
return &inMemory{
schemas: make(map[string]any),
schemas: map[string]interface{}{},
}
}
func key(resourceKind, resourceAPIVersion, k8sVersion string) string {
return fmt.Sprintf("%s-%s-%s", resourceKind, resourceAPIVersion, k8sVersion)
}
// Get retrieves the JSON schema given a resource signature
func (c *inMemory) Get(key string) (any, error) {
func (c *inMemory) Get(resourceKind, resourceAPIVersion, k8sVersion string) (interface{}, error) {
k := key(resourceKind, resourceAPIVersion, k8sVersion)
c.RLock()
defer c.RUnlock()
schema, ok := c.schemas[key]
schema, ok := c.schemas[k]
if !ok {
return nil, fmt.Errorf("schema not found in in-memory cache")
@ -34,10 +39,11 @@ func (c *inMemory) Get(key string) (any, error) {
}
// Set adds a JSON schema to the schema cache
func (c *inMemory) Set(key string, schema any) error {
func (c *inMemory) Set(resourceKind, resourceAPIVersion, k8sVersion string, schema interface{}) error {
k := key(resourceKind, resourceAPIVersion, k8sVersion)
c.Lock()
defer c.Unlock()
c.schemas[key] = schema
c.schemas[k] = schema
return nil
}

22
pkg/cache/ondisk.go vendored
View file

@ -3,7 +3,8 @@ package cache
import (
"crypto/sha256"
"encoding/hex"
"io"
"fmt"
"io/ioutil"
"os"
"path"
"sync"
@ -21,32 +22,27 @@ func NewOnDiskCache(cache string) Cache {
}
}
func cachePath(folder, key string) string {
hash := sha256.Sum256([]byte(key))
func cachePath(folder, resourceKind, resourceAPIVersion, k8sVersion string) string {
hash := sha256.Sum256([]byte(fmt.Sprintf("%s-%s-%s", resourceKind, resourceAPIVersion, k8sVersion)))
return path.Join(folder, hex.EncodeToString(hash[:]))
}
// Get retrieves the JSON schema given a resource signature
func (c *onDisk) Get(key string) (any, error) {
func (c *onDisk) Get(resourceKind, resourceAPIVersion, k8sVersion string) (interface{}, error) {
c.RLock()
defer c.RUnlock()
f, err := os.Open(cachePath(c.folder, key))
f, err := os.Open(cachePath(c.folder, resourceKind, resourceAPIVersion, k8sVersion))
if err != nil {
return nil, err
}
defer f.Close()
return io.ReadAll(f)
return ioutil.ReadAll(f)
}
// Set adds a JSON schema to the schema cache
func (c *onDisk) Set(key string, schema any) error {
func (c *onDisk) Set(resourceKind, resourceAPIVersion, k8sVersion string, schema interface{}) error {
c.Lock()
defer c.Unlock()
if _, err := os.Stat(cachePath(c.folder, key)); os.IsNotExist(err) {
return os.WriteFile(cachePath(c.folder, key), schema.([]byte), 0644)
}
return nil
return ioutil.WriteFile(cachePath(c.folder, resourceKind, resourceAPIVersion, k8sVersion), schema.([]byte), 0644)
}

View file

@ -4,29 +4,28 @@ import (
"bytes"
"flag"
"fmt"
"regexp"
"strings"
)
type Config struct {
Cache string `yaml:"cache" json:"cache"`
Debug bool `yaml:"debug" json:"debug"`
ExitOnError bool `yaml:"exitOnError" json:"exitOnError"`
Files []string `yaml:"files" json:"files"`
Help bool `yaml:"help" json:"help"`
IgnoreFilenamePatterns []string `yaml:"ignoreFilenamePatterns" json:"ignoreFilenamePatterns"`
IgnoreMissingSchemas bool `yaml:"ignoreMissingSchemas" json:"ignoreMissingSchemas"`
KubernetesVersion k8sVersionValue `yaml:"kubernetesVersion" json:"kubernetesVersion"`
NumberOfWorkers int `yaml:"numberOfWorkers" json:"numberOfWorkers"`
OutputFormat string `yaml:"output" json:"output"`
RejectKinds map[string]struct{} `yaml:"reject" json:"reject"`
SchemaLocations []string `yaml:"schemaLocations" json:"schemaLocations"`
SkipKinds map[string]struct{} `yaml:"skip" json:"skip"`
SkipTLS bool `yaml:"insecureSkipTLSVerify" json:"insecureSkipTLSVerify"`
Strict bool `yaml:"strict" json:"strict"`
Summary bool `yaml:"summary" json:"summary"`
Verbose bool `yaml:"verbose" json:"verbose"`
Version bool `yaml:"version" json:"version"`
Cache string
Debug bool
ExitOnError bool
Files []string
SchemaLocations []string
SkipTLS bool
SkipKinds map[string]struct{}
RejectKinds map[string]struct{}
OutputFormat string
KubernetesVersion string
NumberOfWorkers int
Summary bool
Strict bool
Verbose bool
IgnoreMissingSchemas bool
IgnoreFilenamePatterns []string
Help bool
Version bool
}
type arrayParam []string
@ -40,30 +39,11 @@ func (ap *arrayParam) Set(value string) error {
return nil
}
type k8sVersionValue string
func (kv *k8sVersionValue) String() string {
return string(*kv)
}
func (kv k8sVersionValue) MarshalText() ([]byte, error) {
return []byte(kv), nil
}
func (kv *k8sVersionValue) UnmarshalText(v []byte) error {
if ok, _ := regexp.MatchString(`^(master|\d+\.\d+\.\d+)$`, string(v)); ok != true {
return fmt.Errorf("%v is not a valid version. Valid values are \"master\" (default) or full version x.y.z (e.g. \"1.27.2\")", string(v))
}
*kv = k8sVersionValue(v)
return nil
}
func splitCSV(csvStr string) map[string]struct{} {
splitValues := strings.Split(csvStr, ",")
valuesMap := map[string]struct{}{}
for _, kind := range splitValues {
kind = strings.TrimSpace(kind)
if len(kind) > 0 {
valuesMap[kind] = struct{}{}
}
@ -83,7 +63,7 @@ func FromFlags(progName string, args []string) (Config, string, error) {
c := Config{}
c.Files = []string{}
flags.TextVar(&c.KubernetesVersion, "kubernetes-version", k8sVersionValue("master"), "version of Kubernetes to validate against, e.g.: 1.18.0")
flags.StringVar(&c.KubernetesVersion, "kubernetes-version", "master", "version of Kubernetes to validate against, e.g.: 1.18.0")
flags.Var(&schemaLocationsParam, "schema-location", "override schemas location search path (can be specified multiple times)")
flags.StringVar(&skipKindsCSV, "skip", "", "comma-separated list of kinds or GVKs to ignore")
flags.StringVar(&rejectKindsCSV, "reject", "", "comma-separated list of kinds or GVKs to reject")
@ -94,7 +74,7 @@ func FromFlags(progName string, args []string) (Config, string, error) {
flags.BoolVar(&c.Summary, "summary", false, "print a summary at the end (ignored for junit output)")
flags.IntVar(&c.NumberOfWorkers, "n", 4, "number of goroutines to run concurrently")
flags.BoolVar(&c.Strict, "strict", false, "disallow additional properties not in schema or duplicated keys")
flags.StringVar(&c.OutputFormat, "output", "text", "output format - json, junit, pretty, tap, text")
flags.StringVar(&c.OutputFormat, "output", "text", "output format - json, junit, tap, text")
flags.BoolVar(&c.Verbose, "verbose", false, "print results for all resources (ignored for tap and junit output)")
flags.BoolVar(&c.SkipTLS, "insecure-skip-tls-verify", false, "disable verification of the server's SSL certificate. This will make your HTTPS connections insecure")
flags.StringVar(&c.Cache, "cache", "", "cache schemas downloaded via HTTP to this folder")

View file

@ -95,30 +95,6 @@ func TestFromFlags(t *testing.T) {
RejectKinds: map[string]struct{}{},
},
},
{
[]string{"-skip", "a, b, c"},
Config{
Files: []string{},
KubernetesVersion: "master",
NumberOfWorkers: 4,
OutputFormat: "text",
SchemaLocations: nil,
SkipKinds: map[string]struct{}{"a": {}, "b": {}, "c": {}},
RejectKinds: map[string]struct{}{},
},
},
{
[]string{"-skip", "a,b, c"},
Config{
Files: []string{},
KubernetesVersion: "master",
NumberOfWorkers: 4,
OutputFormat: "text",
SchemaLocations: nil,
SkipKinds: map[string]struct{}{"a": {}, "b": {}, "c": {}},
RejectKinds: map[string]struct{}{},
},
},
{
[]string{"-summary", "-verbose", "file1", "file2"},
Config{

View file

@ -1,65 +0,0 @@
package loader
import (
"bytes"
"errors"
"fmt"
"github.com/santhosh-tekuri/jsonschema/v6"
"github.com/yannh/kubeconform/pkg/cache"
"io"
gourl "net/url"
"os"
"path/filepath"
"runtime"
"strings"
)
// FileLoader loads json file url.
type FileLoader struct {
cache cache.Cache
}
func (l FileLoader) Load(url string) (any, error) {
path, err := l.ToFile(url)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
msg := fmt.Sprintf("could not open file %s", path)
return nil, NewNotFoundError(errors.New(msg))
}
return nil, err
}
defer f.Close()
content, err := io.ReadAll(f)
if err != nil {
return nil, err
}
return jsonschema.UnmarshalJSON(bytes.NewReader(content))
}
// ToFile is helper method to convert file url to file path.
func (l FileLoader) ToFile(url string) (string, error) {
u, err := gourl.Parse(url)
if err != nil {
return "", err
}
if u.Scheme != "file" {
return url, nil
}
path := u.Path
if runtime.GOOS == "windows" {
path = strings.TrimPrefix(path, "/")
path = filepath.FromSlash(path)
}
return path, nil
}
func NewFileLoader() *FileLoader {
return &FileLoader{}
}

View file

@ -1,85 +0,0 @@
package loader
import (
"bytes"
"crypto/tls"
"errors"
"fmt"
"github.com/hashicorp/go-retryablehttp"
"github.com/santhosh-tekuri/jsonschema/v6"
"github.com/yannh/kubeconform/pkg/cache"
"io"
"net/http"
"time"
)
type HTTPURLLoader struct {
client http.Client
cache cache.Cache
}
func (l *HTTPURLLoader) Load(url string) (any, error) {
if l.cache != nil {
if cached, err := l.cache.Get(url); err == nil {
return jsonschema.UnmarshalJSON(bytes.NewReader(cached.([]byte)))
}
}
resp, err := l.client.Get(url)
if err != nil {
msg := fmt.Sprintf("failed downloading schema at %s: %s", url, err)
return nil, errors.New(msg)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
msg := fmt.Sprintf("could not find schema at %s", url)
return nil, NewNotFoundError(errors.New(msg))
}
if resp.StatusCode != http.StatusOK {
msg := fmt.Sprintf("error while downloading schema at %s - received HTTP status %d", url, resp.StatusCode)
return nil, fmt.Errorf("%s", msg)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
msg := fmt.Sprintf("failed parsing schema from %s: %s", url, err)
return nil, errors.New(msg)
}
if l.cache != nil {
if err = l.cache.Set(url, body); err != nil {
return nil, fmt.Errorf("failed to write cache to disk: %s", err)
}
}
s, err := jsonschema.UnmarshalJSON(bytes.NewReader(body))
if err != nil {
return nil, NewNonJSONResponseError(err)
}
return s, nil
}
func NewHTTPURLLoader(skipTLS bool, cache cache.Cache) (*HTTPURLLoader, error) {
transport := &http.Transport{
MaxIdleConns: 100,
IdleConnTimeout: 3 * time.Second,
DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
}
if skipTLS {
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
// retriable http client
retryClient := retryablehttp.NewClient()
retryClient.RetryMax = 2
retryClient.HTTPClient = &http.Client{Transport: transport}
retryClient.Logger = nil
httpLoader := HTTPURLLoader{client: *retryClient.StandardClient(), cache: cache}
return &httpLoader, nil
}

View file

@ -1,216 +0,0 @@
package loader
import (
"errors"
"fmt"
"net/http"
"net/http/httptest"
"sync"
"testing"
)
type mockCache struct {
data map[string]any
}
func (m *mockCache) Get(key string) (any, error) {
if val, ok := m.data[key]; ok {
return val, nil
}
return nil, errors.New("cache miss")
}
func (m *mockCache) Set(key string, value any) error {
m.data[key] = value
return nil
}
// Test basic functionality of HTTPURLLoader
func TestHTTPURLLoader_Load(t *testing.T) {
tests := []struct {
name string
mockResponse string
mockStatusCode int
cacheEnabled bool
expectError bool
expectCacheHit bool
}{
{
name: "successful load",
mockResponse: `{"type": "object"}`,
mockStatusCode: http.StatusOK,
cacheEnabled: false,
expectError: false,
},
{
name: "not found error",
mockResponse: "",
mockStatusCode: http.StatusNotFound,
cacheEnabled: false,
expectError: true,
},
{
name: "server error",
mockResponse: "",
mockStatusCode: http.StatusInternalServerError,
cacheEnabled: false,
expectError: true,
},
{
name: "cache hit",
mockResponse: `{"type": "object"}`,
mockStatusCode: http.StatusOK,
cacheEnabled: true,
expectError: false,
expectCacheHit: true,
},
{
name: "Partial response from server",
mockResponse: `{"type": "objec`,
mockStatusCode: http.StatusOK,
cacheEnabled: false,
expectError: true,
expectCacheHit: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Mock HTTP server
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.mockStatusCode)
w.Write([]byte(tt.mockResponse))
}))
defer server.Close()
// Create HTTPURLLoader
loader := &HTTPURLLoader{
client: *server.Client(),
cache: nil,
}
if tt.cacheEnabled {
loader.cache = &mockCache{data: map[string]any{}}
if tt.expectCacheHit {
loader.cache.Set(server.URL, []byte(tt.mockResponse))
}
}
// Call Load and handle errors
res, err := loader.Load(server.URL)
if tt.expectError {
if err == nil {
t.Errorf("expected error, got nil")
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if res == nil {
t.Errorf("expected non-nil result, got nil")
}
}
})
}
}
// Test basic functionality of HTTPURLLoader
func TestHTTPURLLoader_Load_Retries(t *testing.T) {
tests := []struct {
name string
url string
expectError bool
expectCallCount int
consecutiveFailures int
}{
{
name: "retries on 503",
url: "/503",
expectError: false,
expectCallCount: 2,
consecutiveFailures: 2,
},
{
name: "fails when hitting max retries",
url: "/503",
expectError: true,
expectCallCount: 3,
consecutiveFailures: 5,
},
{
name: "retry on connection reset",
url: "/simulate-reset",
expectError: false,
expectCallCount: 2,
consecutiveFailures: 1,
},
{
name: "retry on connection reset",
url: "/simulate-reset",
expectError: true,
expectCallCount: 3,
consecutiveFailures: 5,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ccMutex := &sync.Mutex{}
callCounts := map[string]int{}
// Mock HTTP server
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ccMutex.Lock()
callCounts[r.URL.Path]++
callCount := callCounts[r.URL.Path]
ccMutex.Unlock()
switch r.URL.Path {
case "/simulate-reset":
if callCount <= tt.consecutiveFailures {
if hj, ok := w.(http.Hijacker); ok {
conn, _, err := hj.Hijack()
if err != nil {
fmt.Printf("Hijacking failed: %v\n", err)
return
}
conn.Close() // Close the connection to simulate a reset
}
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"type": "object"}`))
case "/503":
s := http.StatusServiceUnavailable
if callCount >= tt.consecutiveFailures {
s = http.StatusOK
}
w.WriteHeader(s)
w.Write([]byte(`{"type": "object"}`))
}
}))
defer server.Close()
// Create HTTPURLLoader
loader, _ := NewHTTPURLLoader(false, nil)
fullurl := server.URL + tt.url
// Call Load and handle errors
_, err := loader.Load(fullurl)
if tt.expectError && err == nil {
t.Error("expected error, got none")
}
if !tt.expectError && err != nil {
t.Errorf("expected no error, got %v", err)
}
ccMutex.Lock()
if callCounts[tt.url] != tt.expectCallCount {
t.Errorf("expected %d calls, got: %d", tt.expectCallCount, callCounts[tt.url])
}
ccMutex.Unlock()
})
}
}

View file

@ -1,22 +0,0 @@
package loader
// NotFoundError is returned when the registry does not contain a schema for the resource
type NotFoundError struct {
err error
}
func NewNotFoundError(err error) *NotFoundError {
return &NotFoundError{err}
}
func (e *NotFoundError) Error() string { return e.err.Error() }
func (e *NotFoundError) Retryable() bool { return false }
type NonJSONResponseError struct {
err error
}
func NewNonJSONResponseError(err error) *NotFoundError {
return &NotFoundError{err}
}
func (e *NonJSONResponseError) Error() string { return e.err.Error() }
func (e *NonJSONResponseError) Retryable() bool { return false }

View file

@ -65,13 +65,13 @@ type TestCaseError struct {
}
type junito struct {
id int
w io.Writer
withSummary bool
verbose bool
suitesIndex map[string]int // map filename to index in suites
suites []TestSuite
startTime time.Time
id int
w io.Writer
withSummary bool
verbose bool
suites map[string]*TestSuite // map filename to corresponding suite
nValid, nInvalid, nErrors, nSkipped int
startTime time.Time
}
func junitOutput(w io.Writer, withSummary bool, isStdin, verbose bool) Output {
@ -80,28 +80,29 @@ func junitOutput(w io.Writer, withSummary bool, isStdin, verbose bool) Output {
w: w,
withSummary: withSummary,
verbose: verbose,
suites: []TestSuite{},
suitesIndex: make(map[string]int),
suites: make(map[string]*TestSuite),
nValid: 0,
nInvalid: 0,
nErrors: 0,
nSkipped: 0,
startTime: time.Now(),
}
}
// Write adds a result to the report.
func (o *junito) Write(result validator.Result) error {
var suite TestSuite
i, found := o.suitesIndex[result.Resource.Path]
var suite *TestSuite
suite, found := o.suites[result.Resource.Path]
if !found {
o.id++
suite = TestSuite{
suite = &TestSuite{
Name: result.Resource.Path,
Id: o.id,
Tests: 0, Failures: 0, Errors: 0, Disabled: 0, Skipped: 0,
Cases: make([]TestCase, 0),
}
o.suites = append(o.suites, suite)
i = len(o.suites) - 1
o.suitesIndex[result.Resource.Path] = i
o.suites[result.Resource.Path] = suite
}
sig, _ := result.Resource.Signature()
@ -116,22 +117,23 @@ func (o *junito) Write(result validator.Result) error {
switch result.Status {
case validator.Valid:
o.nValid++
case validator.Invalid:
o.suites[i].Failures++
o.nInvalid++
failure := TestCaseError{Message: result.Err.Error()}
testCase.Failure = append(testCase.Failure, failure)
case validator.Error:
o.suites[i].Errors++
o.nErrors++
testCase.Error = &TestCaseError{Message: result.Err.Error()}
case validator.Skipped:
testCase.Skipped = &TestCaseSkipped{}
o.suites[i].Skipped++
o.nSkipped++
case validator.Empty:
return nil
}
o.suites[i].Tests++
o.suites[i].Cases = append(o.suites[i].Cases, testCase)
suite.Tests++
suite.Cases = append(suite.Cases, testCase)
return nil
}
@ -140,33 +142,19 @@ func (o *junito) Write(result validator.Result) error {
func (o *junito) Flush() error {
runtime := time.Now().Sub(o.startTime)
totalValid := 0
totalInvalid := 0
totalErrors := 0
totalSkipped := 0
var suites = make([]TestSuite, 0)
for _, suite := range o.suites {
for _, tCase := range suite.Cases {
if tCase.Error != nil {
totalErrors++
} else if tCase.Skipped != nil {
totalSkipped++
} else if len(tCase.Failure) > 0 {
totalInvalid++
} else {
totalValid++
}
}
suites = append(suites, *suite)
}
root := TestSuiteCollection{
Name: "kubeconform",
Time: runtime.Seconds(),
Tests: totalValid + totalInvalid + totalErrors + totalSkipped,
Failures: totalInvalid,
Errors: totalErrors,
Disabled: totalSkipped,
Suites: o.suites,
Tests: o.nValid + o.nInvalid + o.nErrors + o.nSkipped,
Failures: o.nInvalid,
Errors: o.nErrors,
Disabled: o.nSkipped,
Suites: suites,
}
// 2-space indentation

View file

@ -2,7 +2,6 @@ package output
import (
"bytes"
"fmt"
"regexp"
"testing"
@ -86,67 +85,6 @@ metadata:
" </testsuite>\n" +
"</testsuites>\n",
},
{
"one error, one invalid",
true,
false,
false,
[]validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: "my-app"
`),
},
Status: validator.Error,
Err: fmt.Errorf("error validating deployment.yml"),
},
{
Resource: resource.Resource{
Path: "deployment2.yml",
Bytes: []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: "my-app"
`),
},
Status: validator.Error,
Err: fmt.Errorf("error validating deployment.yml"),
},
{
Resource: resource.Resource{
Path: "deployment3.yml",
Bytes: []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: "my-app"
`),
},
Status: validator.Invalid,
Err: fmt.Errorf("deployment3.yml is invalid"),
},
},
"<testsuites name=\"kubeconform\" time=\"\" tests=\"3\" failures=\"1\" disabled=\"0\" errors=\"2\">\n" +
" <testsuite name=\"deployment.yml\" id=\"1\" tests=\"1\" failures=\"0\" errors=\"1\" disabled=\"0\" skipped=\"0\">\n" +
" <testcase name=\"my-app\" classname=\"Deployment@apps/v1\" time=\"\">\n" +
" <error message=\"error validating deployment.yml\" type=\"\"></error>\n" +
" </testcase>\n" +
" </testsuite>\n" +
" <testsuite name=\"deployment2.yml\" id=\"2\" tests=\"1\" failures=\"0\" errors=\"1\" disabled=\"0\" skipped=\"0\">\n" +
" <testcase name=\"my-app\" classname=\"Deployment@apps/v1\" time=\"\">\n" +
" <error message=\"error validating deployment.yml\" type=\"\"></error>\n" +
" </testcase>\n" +
" </testsuite>\n" +
" <testsuite name=\"deployment3.yml\" id=\"3\" tests=\"1\" failures=\"1\" errors=\"0\" disabled=\"0\" skipped=\"0\">\n" +
" <testcase name=\"my-app\" classname=\"Deployment@apps/v1\" time=\"\">\n" +
" <failure message=\"deployment3.yml is invalid\" type=\"\"></failure>\n" +
" </testcase>\n" +
" </testsuite>\n" +
"</testsuites>\n",
},
} {
w := new(bytes.Buffer)
o := junitOutput(w, testCase.withSummary, testCase.isStdin, testCase.verbose)

View file

@ -2,7 +2,7 @@ package output
import (
"fmt"
"io"
"os"
"github.com/yannh/kubeconform/pkg/validator"
)
@ -12,19 +12,19 @@ type Output interface {
Flush() error
}
func New(w io.Writer, outputFormat string, printSummary, isStdin, verbose bool) (Output, error) {
func New(outputFormat string, printSummary, isStdin, verbose bool) (Output, error) {
w := os.Stdout
switch {
case outputFormat == "json":
return jsonOutput(w, printSummary, isStdin, verbose), nil
case outputFormat == "junit":
return junitOutput(w, printSummary, isStdin, verbose), nil
case outputFormat == "pretty":
return prettyOutput(w, printSummary, isStdin, verbose), nil
case outputFormat == "tap":
return tapOutput(w, printSummary, isStdin, verbose), nil
case outputFormat == "text":
return textOutput(w, printSummary, isStdin, verbose), nil
default:
return nil, fmt.Errorf("'outputFormat' must be 'json', 'junit', 'pretty', 'tap' or 'text'")
return nil, fmt.Errorf("`outputFormat` must be 'json', 'tap' or 'text'")
}
}

View file

@ -1,109 +0,0 @@
package output
import (
"fmt"
"io"
"sync"
"github.com/yannh/kubeconform/pkg/validator"
)
type prettyo struct {
sync.Mutex
w io.Writer
withSummary bool
isStdin bool
verbose bool
files map[string]bool
nValid, nInvalid, nErrors, nSkipped int
}
// Text will output the results of the validation as a texto
func prettyOutput(w io.Writer, withSummary, isStdin, verbose bool) Output {
return &prettyo{
w: w,
withSummary: withSummary,
isStdin: isStdin,
verbose: verbose,
files: map[string]bool{},
nValid: 0,
nInvalid: 0,
nErrors: 0,
nSkipped: 0,
}
}
func (o *prettyo) Write(result validator.Result) error {
checkmark := "\u2714"
multiplicationSign := "\u2716"
reset := "\033[0m"
cRed := "\033[31m"
cGreen := "\033[32m"
cYellow := "\033[33m"
o.Lock()
defer o.Unlock()
var err error
sig, _ := result.Resource.Signature()
o.files[result.Resource.Path] = true
switch result.Status {
case validator.Valid:
if o.verbose {
fmt.Fprintf(o.w, "%s%s%s %s: %s%s %s is valid%s\n", cGreen, checkmark, reset, result.Resource.Path, cGreen, sig.Kind, sig.Name, reset)
}
o.nValid++
case validator.Invalid:
fmt.Fprintf(o.w, "%s%s%s %s: %s%s %s is invalid: %s%s\n", cRed, multiplicationSign, reset, result.Resource.Path, cRed, sig.Kind, sig.Name, result.Err.Error(), reset)
o.nInvalid++
case validator.Error:
fmt.Fprintf(o.w, "%s%s%s %s: ", cRed, multiplicationSign, reset, result.Resource.Path)
if sig.Kind != "" && sig.Name != "" {
fmt.Fprintf(o.w, "%s%s failed validation: %s %s%s\n", cRed, sig.Kind, sig.Name, result.Err.Error(), reset)
} else {
fmt.Fprintf(o.w, "%sfailed validation: %s %s%s\n", cRed, sig.Name, result.Err.Error(), reset)
}
o.nErrors++
case validator.Skipped:
if o.verbose {
fmt.Fprintf(o.w, "%s-%s %s: ", cYellow, reset, result.Resource.Path)
if sig.Kind != "" && sig.Name != "" {
fmt.Fprintf(o.w, "%s%s %s skipped%s\n", cYellow, sig.Kind, sig.Name, reset)
} else if sig.Kind != "" {
fmt.Fprintf(o.w, "%s%s skipped%s\n", cYellow, sig.Kind, reset)
} else {
fmt.Fprintf(o.w, "%sskipped%s\n", cYellow, reset)
}
}
o.nSkipped++
case validator.Empty: // sent to ensure we count the filename as parsed
}
return err
}
func (o *prettyo) Flush() error {
var err error
if o.withSummary {
nFiles := len(o.files)
nResources := o.nValid + o.nInvalid + o.nErrors + o.nSkipped
resourcesPlural := ""
if nResources > 1 {
resourcesPlural = "s"
}
filesPlural := ""
if nFiles > 1 {
filesPlural = "s"
}
if o.isStdin {
_, err = fmt.Fprintf(o.w, "Summary: %d resource%s found parsing stdin - Valid: %d, Invalid: %d, Errors: %d, Skipped: %d\n", nResources, resourcesPlural, o.nValid, o.nInvalid, o.nErrors, o.nSkipped)
} else {
_, err = fmt.Fprintf(o.w, "Summary: %d resource%s found in %d file%s - Valid: %d, Invalid: %d, Errors: %d, Skipped: %d\n", nResources, resourcesPlural, nFiles, filesPlural, o.nValid, o.nInvalid, o.nErrors, o.nSkipped)
}
}
return err
}

View file

@ -1,84 +0,0 @@
package output
import (
"bytes"
"testing"
"github.com/yannh/kubeconform/pkg/resource"
"github.com/yannh/kubeconform/pkg/validator"
)
func TestPrettyTextWrite(t *testing.T) {
for _, testCase := range []struct {
name string
withSummary bool
isStdin bool
verbose bool
results []validator.Result
expect string
}{
{
"a single deployment, no summary, no verbose",
false,
false,
false,
[]validator.Result{},
"",
},
{
"a single deployment, summary, no verbose",
true,
false,
false,
[]validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: "my-app"
`),
},
Status: validator.Valid,
Err: nil,
},
},
"Summary: 1 resource found in 1 file - Valid: 1, Invalid: 0, Errors: 0, Skipped: 0\n",
},
{
"a single deployment, verbose, with summary",
true,
false,
true,
[]validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: "my-app"
`),
},
Status: validator.Valid,
Err: nil,
},
},
"\033[32mâś”\033[0m deployment.yml: \033[32mDeployment my-app is valid\033[0m\n" +
"Summary: 1 resource found in 1 file - Valid: 1, Invalid: 0, Errors: 0, Skipped: 0\n",
},
} {
w := new(bytes.Buffer)
o := prettyOutput(w, testCase.withSummary, testCase.isStdin, testCase.verbose)
for _, res := range testCase.results {
o.Write(res)
}
o.Flush()
if w.String() != testCase.expect {
t.Errorf("%s - expected, but got:\n%s\n%s\n", testCase.name, testCase.expect, w)
}
}
}

View file

@ -1,34 +1,122 @@
package registry
import (
"github.com/santhosh-tekuri/jsonschema/v6"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"github.com/yannh/kubeconform/pkg/cache"
)
type httpGetter interface {
Get(url string) (resp *http.Response, err error)
}
// SchemaRegistry is a file repository (local or remote) that contains JSON schemas for Kubernetes resources
type SchemaRegistry struct {
c httpGetter
schemaPathTemplate string
cache cache.Cache
strict bool
debug bool
loader jsonschema.URLLoader
}
func newHTTPRegistry(schemaPathTemplate string, loader jsonschema.URLLoader, strict bool, debug bool) (*SchemaRegistry, error) {
func newHTTPRegistry(schemaPathTemplate string, cacheFolder string, strict bool, skipTLS bool, debug bool) (*SchemaRegistry, error) {
reghttp := &http.Transport{
MaxIdleConns: 100,
IdleConnTimeout: 3 * time.Second,
DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
}
if skipTLS {
reghttp.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
var filecache cache.Cache = nil
if cacheFolder != "" {
fi, err := os.Stat(cacheFolder)
if err != nil {
return nil, fmt.Errorf("failed opening cache folder %s: %s", cacheFolder, err)
}
if !fi.IsDir() {
return nil, fmt.Errorf("cache folder %s is not a directory", err)
}
filecache = cache.NewOnDiskCache(cacheFolder)
}
return &SchemaRegistry{
c: &http.Client{Transport: reghttp},
schemaPathTemplate: schemaPathTemplate,
cache: filecache,
strict: strict,
loader: loader,
debug: debug,
}, nil
}
// DownloadSchema downloads the schema for a particular resource from an HTTP server
func (r SchemaRegistry) DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, any, error) {
func (r SchemaRegistry) DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, []byte, error) {
url, err := schemaPath(r.schemaPathTemplate, resourceKind, resourceAPIVersion, k8sVersion, r.strict)
if err != nil {
return "", nil, err
}
resp, err := r.loader.Load(url)
if r.cache != nil {
if b, err := r.cache.Get(resourceKind, resourceAPIVersion, k8sVersion); err == nil {
return url, b.([]byte), nil
}
}
return url, resp, err
resp, err := r.c.Get(url)
if err != nil {
msg := fmt.Sprintf("failed downloading schema at %s: %s", url, err)
if r.debug {
log.Println(msg)
}
return url, nil, errors.New(msg)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
msg := fmt.Sprintf("could not find schema at %s", url)
if r.debug {
log.Print(msg)
}
return url, nil, newNotFoundError(errors.New(msg))
}
if resp.StatusCode != http.StatusOK {
msg := fmt.Sprintf("error while downloading schema at %s - received HTTP status %d", url, resp.StatusCode)
if r.debug {
log.Print(msg)
}
return url, nil, fmt.Errorf(msg)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
msg := fmt.Sprintf("failed parsing schema from %s: %s", url, err)
if r.debug {
log.Print(msg)
}
return url, nil, errors.New(msg)
}
if r.debug {
log.Printf("using schema found at %s", url)
}
if r.cache != nil {
if err := r.cache.Set(resourceKind, resourceAPIVersion, k8sVersion, body); err != nil {
return url, nil, fmt.Errorf("failed writing schema to cache: %s", err)
}
}
return url, body, nil
}

117
pkg/registry/http_test.go Normal file
View file

@ -0,0 +1,117 @@
package registry
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"strings"
"testing"
)
type mockHTTPGetter struct {
httpGet func(string) (*http.Response, error)
}
func newMockHTTPGetter(f func(string) (*http.Response, error)) *mockHTTPGetter {
return &mockHTTPGetter{
httpGet: f,
}
}
func (m mockHTTPGetter) Get(url string) (resp *http.Response, err error) {
return m.httpGet(url)
}
func TestDownloadSchema(t *testing.T) {
for _, testCase := range []struct {
name string
c httpGetter
schemaPathTemplate string
strict bool
resourceKind, resourceAPIVersion, k8sversion string
expect []byte
expectErr error
}{
{
"error when downloading",
newMockHTTPGetter(func(url string) (resp *http.Response, err error) {
return nil, fmt.Errorf("failed downloading from registry")
}),
"http://kubernetesjson.dev",
true,
"Deployment",
"v1",
"1.18.0",
nil,
fmt.Errorf("failed downloading schema at http://kubernetesjson.dev: failed downloading from registry"),
},
{
"getting 404",
newMockHTTPGetter(func(url string) (resp *http.Response, err error) {
return &http.Response{
StatusCode: http.StatusNotFound,
Body: ioutil.NopCloser(strings.NewReader("http response mock body")),
}, nil
}),
"http://kubernetesjson.dev",
true,
"Deployment",
"v1",
"1.18.0",
nil,
fmt.Errorf("could not find schema at http://kubernetesjson.dev"),
},
{
"getting 503",
newMockHTTPGetter(func(url string) (resp *http.Response, err error) {
return &http.Response{
StatusCode: http.StatusServiceUnavailable,
Body: ioutil.NopCloser(strings.NewReader("http response mock body")),
}, nil
}),
"http://kubernetesjson.dev",
true,
"Deployment",
"v1",
"1.18.0",
nil,
fmt.Errorf("error while downloading schema at http://kubernetesjson.dev - received HTTP status 503"),
},
{
"200",
newMockHTTPGetter(func(url string) (resp *http.Response, err error) {
return &http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(strings.NewReader("http response mock body")),
}, nil
}),
"http://kubernetesjson.dev",
true,
"Deployment",
"v1",
"1.18.0",
[]byte("http response mock body"),
nil,
},
} {
reg := SchemaRegistry{
c: testCase.c,
schemaPathTemplate: testCase.schemaPathTemplate,
strict: testCase.strict,
}
_, res, err := reg.DownloadSchema(testCase.resourceKind, testCase.resourceAPIVersion, testCase.k8sversion)
if err == nil || testCase.expectErr == nil {
if err != testCase.expectErr {
t.Errorf("during test '%s': expected error, got:\n%s\n%s\n", testCase.name, testCase.expectErr, err)
}
} else if err.Error() != testCase.expectErr.Error() {
t.Errorf("during test '%s': expected error, got:\n%s\n%s\n", testCase.name, testCase.expectErr, err)
}
if !bytes.Equal(res, testCase.expect) {
t.Errorf("during test '%s': expected %s, got %s", testCase.name, testCase.expect, res)
}
}
}

View file

@ -1,33 +1,63 @@
package registry
import (
"github.com/santhosh-tekuri/jsonschema/v6"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
)
type LocalRegistry struct {
pathTemplate string
strict bool
debug bool
loader jsonschema.URLLoader
}
// NewLocalSchemas creates a new "registry", that will serve schemas from files, given a list of schema filenames
func newLocalRegistry(pathTemplate string, loader jsonschema.URLLoader, strict bool, debug bool) (*LocalRegistry, error) {
func newLocalRegistry(pathTemplate string, strict bool, debug bool) (*LocalRegistry, error) {
return &LocalRegistry{
pathTemplate,
strict,
debug,
loader,
}, nil
}
// DownloadSchema retrieves the schema from a file for the resource
func (r LocalRegistry) DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, any, error) {
func (r LocalRegistry) DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, []byte, error) {
schemaFile, err := schemaPath(r.pathTemplate, resourceKind, resourceAPIVersion, k8sVersion, r.strict)
if err != nil {
return schemaFile, []byte{}, nil
}
f, err := os.Open(schemaFile)
if err != nil {
if os.IsNotExist(err) {
msg := fmt.Sprintf("could not open file %s", schemaFile)
if r.debug {
log.Print(msg)
}
return schemaFile, nil, newNotFoundError(errors.New(msg))
}
s, err := r.loader.Load(schemaFile)
return schemaFile, s, err
msg := fmt.Sprintf("failed to open schema at %s: %s", schemaFile, err)
if r.debug {
log.Print(msg)
}
return schemaFile, nil, errors.New(msg)
}
defer f.Close()
content, err := ioutil.ReadAll(f)
if err != nil {
msg := fmt.Sprintf("failed to read schema at %s: %s", schemaFile, err)
if r.debug {
log.Print(msg)
}
return schemaFile, nil, err
}
if r.debug {
log.Printf("using schema found at %s", schemaFile)
}
return schemaFile, content, nil
}

View file

@ -3,9 +3,6 @@ package registry
import (
"bytes"
"fmt"
"github.com/yannh/kubeconform/pkg/cache"
"github.com/yannh/kubeconform/pkg/loader"
"os"
"strings"
"text/template"
)
@ -16,9 +13,25 @@ type Manifest struct {
// Registry is an interface that should be implemented by any source of Kubernetes schemas
type Registry interface {
DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, any, error)
DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, []byte, error)
}
// Retryable indicates whether an error is a temporary or a permanent failure
type Retryable interface {
IsNotFound() bool
}
// NotFoundError is returned when the registry does not contain a schema for the resource
type NotFoundError struct {
err error
}
func newNotFoundError(err error) *NotFoundError {
return &NotFoundError{err}
}
func (e *NotFoundError) Error() string { return e.err.Error() }
func (e *NotFoundError) Retryable() bool { return false }
func schemaPath(tpl, resourceKind, resourceAPIVersion, k8sVersion string, strict bool) (string, error) {
normalisedVersion := k8sVersion
if normalisedVersion != "master" {
@ -68,7 +81,7 @@ func schemaPath(tpl, resourceKind, resourceAPIVersion, k8sVersion string, strict
return buf.String(), nil
}
func New(schemaLocation string, cacheFolder string, strict bool, skipTLS bool, debug bool) (Registry, error) {
func New(schemaLocation string, cache string, strict bool, skipTLS bool, debug bool) (Registry, error) {
if schemaLocation == "default" {
schemaLocation = "https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/{{ .NormalizedKubernetesVersion }}-standalone{{ .StrictSuffix }}/{{ .ResourceKind }}{{ .KindSuffix }}.json"
} else if !strings.HasSuffix(schemaLocation, "json") { // If we dont specify a full templated path, we assume the paths of our fork of kubernetes-json-schema
@ -80,27 +93,9 @@ func New(schemaLocation string, cacheFolder string, strict bool, skipTLS bool, d
return nil, fmt.Errorf("failed initialising schema location registry: %s", err)
}
var c cache.Cache = nil
if cacheFolder != "" {
fi, err := os.Stat(cacheFolder)
if err != nil {
return nil, fmt.Errorf("failed opening cache folder %s: %s", cacheFolder, err)
}
if !fi.IsDir() {
return nil, fmt.Errorf("cache folder %s is not a directory", err)
}
c = cache.NewOnDiskCache(cacheFolder)
}
if strings.HasPrefix(schemaLocation, "http") {
httpLoader, err := loader.NewHTTPURLLoader(skipTLS, c)
if err != nil {
return nil, fmt.Errorf("failed creating HTTP loader: %s", err)
}
return newHTTPRegistry(schemaLocation, httpLoader, strict, debug)
return newHTTPRegistry(schemaLocation, cache, strict, skipTLS, debug)
}
fileLoader := loader.NewFileLoader()
return newLocalRegistry(schemaLocation, fileLoader, strict, debug)
return newLocalRegistry(schemaLocation, strict, debug)
}

View file

@ -5,18 +5,14 @@ import (
"context"
"errors"
"fmt"
jsonschema "github.com/santhosh-tekuri/jsonschema/v6"
"io"
jsonschema "github.com/santhosh-tekuri/jsonschema/v5"
_ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
"github.com/yannh/kubeconform/pkg/cache"
"github.com/yannh/kubeconform/pkg/loader"
"github.com/yannh/kubeconform/pkg/registry"
"github.com/yannh/kubeconform/pkg/resource"
"golang.org/x/text/language"
"golang.org/x/text/message"
"io"
"os"
"sigs.k8s.io/yaml"
"strings"
"time"
)
// Different types of validation results
@ -63,7 +59,7 @@ type Opts struct {
SkipKinds map[string]struct{} // List of resource Kinds to ignore
RejectKinds map[string]struct{} // List of resource Kinds to reject
KubernetesVersion string // Kubernetes Version - has to match one in https://github.com/instrumenta/kubernetes-json-schema
Strict bool // Throws an error if resources contain undocumented fields
Strict bool // thros an error if resources contain undocumented fields
IgnoreMissingSchemas bool // skip a resource if no schema for that resource can be found
}
@ -95,48 +91,19 @@ func New(schemaLocations []string, opts Opts) (Validator, error) {
opts.RejectKinds = map[string]struct{}{}
}
var filecache cache.Cache = nil
if opts.Cache != "" {
fi, err := os.Stat(opts.Cache)
if err != nil {
return nil, fmt.Errorf("failed opening cache folder %s: %s", opts.Cache, err)
}
if !fi.IsDir() {
return nil, fmt.Errorf("cache folder %s is not a directory", err)
}
filecache = cache.NewOnDiskCache(opts.Cache)
}
httpLoader, err := loader.NewHTTPURLLoader(false, filecache)
if err != nil {
return nil, fmt.Errorf("failed creating HTTP loader: %s", err)
}
return &v{
opts: opts,
schemaDownload: downloadSchema,
schemaMemoryCache: cache.NewInMemoryCache(),
regs: registries,
loader: jsonschema.SchemeURLLoader{
"file": jsonschema.FileLoader{},
"http": httpLoader,
"https": httpLoader,
},
opts: opts,
schemaDownload: downloadSchema,
schemaCache: cache.NewInMemoryCache(),
regs: registries,
}, nil
}
type v struct {
opts Opts
schemaDiskCache cache.Cache
schemaMemoryCache cache.Cache
schemaDownload func(registries []registry.Registry, loader jsonschema.SchemeURLLoader, kind, version, k8sVersion string) (*jsonschema.Schema, error)
regs []registry.Registry
loader jsonschema.SchemeURLLoader
}
func key(resourceKind, resourceAPIVersion, k8sVersion string) string {
return fmt.Sprintf("%s-%s-%s", resourceKind, resourceAPIVersion, k8sVersion)
opts Opts
schemaCache cache.Cache
schemaDownload func(registries []registry.Registry, kind, version, k8sVersion string) (*jsonschema.Schema, error)
regs []registry.Registry
}
// ValidateResource validates a single resource. This allows to validate
@ -197,8 +164,8 @@ func (val *v) ValidateResource(res resource.Resource) Result {
cached := false
var schema *jsonschema.Schema
if val.schemaMemoryCache != nil {
s, err := val.schemaMemoryCache.Get(key(sig.Kind, sig.Version, val.opts.KubernetesVersion))
if val.schemaCache != nil {
s, err := val.schemaCache.Get(sig.Kind, sig.Version, val.opts.KubernetesVersion)
if err == nil {
cached = true
schema = s.(*jsonschema.Schema)
@ -206,12 +173,12 @@ func (val *v) ValidateResource(res resource.Resource) Result {
}
if !cached {
if schema, err = val.schemaDownload(val.regs, val.loader, sig.Kind, sig.Version, val.opts.KubernetesVersion); err != nil {
if schema, err = val.schemaDownload(val.regs, sig.Kind, sig.Version, val.opts.KubernetesVersion); err != nil {
return Result{Resource: res, Err: err, Status: Error}
}
if val.schemaMemoryCache != nil {
val.schemaMemoryCache.Set(key(sig.Kind, sig.Version, val.opts.KubernetesVersion), schema)
if val.schemaCache != nil {
val.schemaCache.Set(sig.Kind, sig.Version, val.opts.KubernetesVersion, schema)
}
}
@ -229,22 +196,17 @@ func (val *v) ValidateResource(res resource.Resource) Result {
var e *jsonschema.ValidationError
if errors.As(err, &e) {
for _, ve := range e.Causes {
path := ""
for _, f := range ve.InstanceLocation {
path = path + "/" + f
}
validationErrors = append(validationErrors, ValidationError{
Path: path,
Msg: ve.ErrorKind.LocalizedString(message.NewPrinter(language.English)),
Path: ve.InstanceLocation,
Msg: ve.Message,
})
}
}
return Result{
Resource: res,
Status: Invalid,
Err: fmt.Errorf("problem validating schema. Check JSON formatting: %s", strings.ReplaceAll(err.Error(), "\n", " ")),
Err: fmt.Errorf("problem validating schema. Check JSON formatting: %s", err),
ValidationErrors: validationErrors,
}
}
@ -260,9 +222,8 @@ func (val *v) ValidateWithContext(ctx context.Context, filename string, r io.Rea
for {
select {
case res, ok := <-resourcesChan:
if ok {
validationResults = append(validationResults, val.ValidateResource(res))
} else {
validationResults = append(validationResults, val.ValidateResource(res))
if !ok {
resourcesChan = nil
}
@ -285,112 +246,24 @@ func (val *v) Validate(filename string, r io.ReadCloser) []Result {
return val.ValidateWithContext(context.Background(), filename, r)
}
// validateDuration is a custom validator for the duration format
// as JSONSchema only supports the ISO 8601 format, i.e. `PT1H30M`,
// while Kubernetes API machinery expects the Go duration format, i.e. `1h30m`
// which is commonly used in Kubernetes operators for specifying intervals.
// https://github.com/kubernetes/apiextensions-apiserver/blob/1ecd29f74da0639e2e6e3b8fac0c9bfd217e05eb/pkg/apis/apiextensions/v1/types_jsonschema.go#L71
func validateDuration(v any) error {
// Try validation with the Go duration format
if _, err := time.ParseDuration(v.(string)); err == nil {
return nil
}
s, ok := v.(string)
if !ok {
return nil
}
// must start with 'P'
s, ok = strings.CutPrefix(s, "P")
if !ok {
return fmt.Errorf("must start with P")
}
if s == "" {
return fmt.Errorf("nothing after P")
}
// dur-week
if s, ok := strings.CutSuffix(s, "W"); ok {
if s == "" {
return fmt.Errorf("no number in week")
}
for _, ch := range s {
if ch < '0' || ch > '9' {
return fmt.Errorf("invalid week")
}
}
return nil
}
allUnits := []string{"YMD", "HMS"}
for i, s := range strings.Split(s, "T") {
if i != 0 && s == "" {
return fmt.Errorf("no time elements")
}
if i >= len(allUnits) {
return fmt.Errorf("more than one T")
}
units := allUnits[i]
for s != "" {
digitCount := 0
for _, ch := range s {
if ch >= '0' && ch <= '9' {
digitCount++
} else {
break
}
}
if digitCount == 0 {
return fmt.Errorf("missing number")
}
s = s[digitCount:]
if s == "" {
return fmt.Errorf("missing unit")
}
unit := s[0]
j := strings.IndexByte(units, unit)
if j == -1 {
if strings.IndexByte(allUnits[i], unit) != -1 {
return fmt.Errorf("unit %q out of order", unit)
}
return fmt.Errorf("invalid unit %q", unit)
}
units = units[j+1:]
s = s[1:]
}
}
return nil
}
func downloadSchema(registries []registry.Registry, l jsonschema.SchemeURLLoader, kind, version, k8sVersion string) (*jsonschema.Schema, error) {
func downloadSchema(registries []registry.Registry, kind, version, k8sVersion string) (*jsonschema.Schema, error) {
var err error
var schemaBytes []byte
var path string
var s any
for _, reg := range registries {
path, s, err = reg.DownloadSchema(kind, version, k8sVersion)
path, schemaBytes, err = reg.DownloadSchema(kind, version, k8sVersion)
if err == nil {
c := jsonschema.NewCompiler()
c.RegisterFormat(&jsonschema.Format{"duration", validateDuration})
c.UseLoader(l)
c.DefaultDraft(jsonschema.Draft4)
if err := c.AddResource(path, s); err != nil {
continue
}
schema, err := c.Compile(path)
schema, err := jsonschema.CompileString(path, string(schemaBytes))
// If we got a non-parseable response, we try the next registry
if err != nil {
continue
}
return schema, nil
return schema, err
}
if _, notfound := err.(*loader.NotFoundError); notfound {
continue
}
if _, nonJSONError := err.(*loader.NonJSONResponseError); nonJSONError {
// If we get a 404, we try the next registry, but we exit if we get a real failure
if _, notfound := err.(*registry.NotFoundError); notfound {
continue
}

View file

@ -1,10 +1,6 @@
package validator
import (
"bytes"
"github.com/santhosh-tekuri/jsonschema/v6"
"github.com/yannh/kubeconform/pkg/loader"
"io"
"reflect"
"testing"
@ -14,16 +10,16 @@ import (
)
type mockRegistry struct {
SchemaDownloader func() (string, any, error)
SchemaDownloader func() (string, []byte, error)
}
func newMockRegistry(f func() (string, any, error)) *mockRegistry {
func newMockRegistry(f func() (string, []byte, error)) *mockRegistry {
return &mockRegistry{
SchemaDownloader: f,
}
}
func (m mockRegistry) DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, any, error) {
func (m mockRegistry) DownloadSchema(resourceKind, resourceAPIVersion, k8sVersion string) (string, []byte, error) {
return m.SchemaDownloader()
}
@ -34,8 +30,7 @@ func TestValidate(t *testing.T) {
schemaRegistry2 []byte
ignoreMissingSchema bool
strict bool
expectStatus Status
expectErrors []ValidationError
expect Status
}{
{
"valid resource",
@ -70,7 +65,6 @@ lastName: bar
false,
false,
Valid,
[]ValidationError{},
},
{
"invalid resource",
@ -105,12 +99,6 @@ lastName: bar
false,
false,
Invalid,
[]ValidationError{
{
Path: "/firstName",
Msg: "got string, want number",
},
},
},
{
"missing required field",
@ -144,12 +132,6 @@ firstName: foo
false,
false,
Invalid,
[]ValidationError{
{
Path: "",
Msg: "missing property 'lastName'",
},
},
},
{
"key \"firstName\" already set in map",
@ -176,7 +158,6 @@ firstName: bar
false,
true,
Error,
[]ValidationError{},
},
{
"key firstname already set in map in non-strict mode",
@ -203,7 +184,6 @@ firstName: bar
false,
false,
Valid,
[]ValidationError{},
},
{
"resource has invalid yaml",
@ -241,7 +221,6 @@ lastName: bar
false,
false,
Error,
[]ValidationError{},
},
{
"missing schema in 1st registry",
@ -279,7 +258,6 @@ lastName: bar
false,
false,
Valid,
[]ValidationError{},
},
{
"non-json response in 1st registry",
@ -317,7 +295,6 @@ lastName: bar
false,
false,
Valid,
[]ValidationError{},
},
{
"missing schema in both registries, ignore missing",
@ -332,7 +309,6 @@ lastName: bar
true,
false,
Skipped,
[]ValidationError{},
},
{
"missing schema in both registries, do not ignore missing",
@ -347,7 +323,6 @@ lastName: bar
false,
false,
Error,
[]ValidationError{},
},
{
"non-json response in both registries, ignore missing",
@ -362,7 +337,6 @@ lastName: bar
true,
false,
Skipped,
[]ValidationError{},
},
{
"non-json response in both registries, do not ignore missing",
@ -377,88 +351,6 @@ lastName: bar
false,
false,
Error,
[]ValidationError{},
},
{
"valid resource duration - go format",
[]byte(`
kind: name
apiVersion: v1
interval: 5s
`),
[]byte(`{
"title": "Example Schema",
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"interval": {
"type": "string",
"format": "duration"
}
},
"required": ["interval"]
}`),
nil,
false,
false,
Valid,
[]ValidationError{},
},
{
"valid resource duration - iso8601 format",
[]byte(`
kind: name
apiVersion: v1
interval: PT1H
`),
[]byte(`{
"title": "Example Schema",
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"interval": {
"type": "string",
"format": "duration"
}
},
"required": ["interval"]
}`),
nil,
false,
false,
Valid,
[]ValidationError{},
},
{
"invalid resource duration",
[]byte(`
kind: name
apiVersion: v1
interval: test
`),
[]byte(`{
"title": "Example Schema",
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"interval": {
"type": "string",
"format": "duration"
}
},
"required": ["interval"]
}`),
nil,
false,
false,
Invalid,
[]ValidationError{{Path: "/interval", Msg: "'test' is not valid duration: must start with P"}},
},
} {
val := v{
@ -468,45 +360,22 @@ interval: test
IgnoreMissingSchemas: testCase.ignoreMissingSchema,
Strict: testCase.strict,
},
schemaCache: nil,
schemaDownload: downloadSchema,
regs: []registry.Registry{
newMockRegistry(func() (string, any, error) {
if testCase.schemaRegistry1 == nil {
return "", nil, loader.NewNotFoundError(nil)
}
s, err := jsonschema.UnmarshalJSON(bytes.NewReader(testCase.schemaRegistry1))
if err != nil {
return "", s, loader.NewNonJSONResponseError(err)
}
return "", s, err
newMockRegistry(func() (string, []byte, error) {
return "", testCase.schemaRegistry1, nil
}),
newMockRegistry(func() (string, any, error) {
if testCase.schemaRegistry2 == nil {
return "", nil, loader.NewNotFoundError(nil)
}
s, err := jsonschema.UnmarshalJSON(bytes.NewReader(testCase.schemaRegistry2))
if err != nil {
return "", s, loader.NewNonJSONResponseError(err)
}
return "", s, err
newMockRegistry(func() (string, []byte, error) {
return "", testCase.schemaRegistry2, nil
}),
},
}
got := val.ValidateResource(resource.Resource{Bytes: testCase.rawResource})
if got.Status != testCase.expectStatus {
if got := val.ValidateResource(resource.Resource{Bytes: testCase.rawResource}); got.Status != testCase.expect {
if got.Err != nil {
t.Errorf("Test '%s' - expected %d, got %d: %s", testCase.name, testCase.expectStatus, got.Status, got.Err.Error())
t.Errorf("Test '%s' - expected %d, got %d: %s", testCase.name, testCase.expect, got.Status, got.Err.Error())
} else {
t.Errorf("Test '%s'- %d - expected %d, got %d", testCase.name, i, testCase.expectStatus, got.Status)
}
}
if len(got.ValidationErrors) != len(testCase.expectErrors) {
t.Errorf("Test '%s': expected ValidationErrors: %+v, got: % v", testCase.name, testCase.expectErrors, got.ValidationErrors)
}
for i, _ := range testCase.expectErrors {
if testCase.expectErrors[i] != got.ValidationErrors[i] {
t.Errorf("Test '%s': expected ValidationErrors: %+v, got: % v", testCase.name, testCase.expectErrors, got.ValidationErrors)
t.Errorf("%d - expected %d, got %d", i, testCase.expect, got.Status)
}
}
}
@ -543,8 +412,8 @@ age: not a number
}`)
expectedErrors := []ValidationError{
{Path: "", Msg: "missing property 'lastName'"},
{Path: "/age", Msg: "got string, want integer"},
{Path: "", Msg: "missing properties: 'lastName'"},
{Path: "/age", Msg: "expected integer, but got string"},
}
val := v{
@ -552,14 +421,11 @@ age: not a number
SkipKinds: map[string]struct{}{},
RejectKinds: map[string]struct{}{},
},
schemaCache: nil,
schemaDownload: downloadSchema,
regs: []registry.Registry{
newMockRegistry(func() (string, any, error) {
s, err := jsonschema.UnmarshalJSON(bytes.NewReader(schema))
if err != nil {
return "", s, loader.NewNonJSONResponseError(err)
}
return "", s, err
newMockRegistry(func() (string, []byte, error) {
return "", schema, nil
}),
},
}
@ -569,65 +435,3 @@ age: not a number
t.Errorf("Expected %+v, got %+v", expectedErrors, got.ValidationErrors)
}
}
func TestValidateFile(t *testing.T) {
inputData := []byte(`
kind: name
apiVersion: v1
firstName: bar
lastName: qux
---
kind: name
apiVersion: v1
firstName: foo
`)
schema := []byte(`{
"title": "Example Schema",
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"firstName": {
"type": "string"
},
"lastName": {
"type": "string"
}
},
"required": ["firstName", "lastName"]
}`)
val := v{
opts: Opts{
SkipKinds: map[string]struct{}{},
RejectKinds: map[string]struct{}{},
},
schemaDownload: downloadSchema,
regs: []registry.Registry{
newMockRegistry(func() (string, any, error) {
s, err := jsonschema.UnmarshalJSON(bytes.NewReader(schema))
return "", s, err
}),
},
}
gotStatuses := []Status{}
gotValidationErrors := []ValidationError{}
for _, got := range val.Validate("test-file", io.NopCloser(bytes.NewReader(inputData))) {
gotStatuses = append(gotStatuses, got.Status)
gotValidationErrors = append(gotValidationErrors, got.ValidationErrors...)
}
expectedStatuses := []Status{Valid, Invalid}
expectedValidationErrors := []ValidationError{
{Path: "", Msg: "missing property 'lastName'"},
}
if !reflect.DeepEqual(expectedStatuses, gotStatuses) {
t.Errorf("Expected %+v, got %+v", expectedStatuses, gotStatuses)
}
if !reflect.DeepEqual(expectedValidationErrors, gotValidationErrors) {
t.Errorf("Expected %+v, got %+v", expectedValidationErrors, gotValidationErrors)
}
}

View file

@ -72,10 +72,3 @@ setup() {
run diff prometheus_v1.json ./fixtures/prometheus_v1-denyRootAdditionalProperties.json
[ "$status" -eq 0 ]
}
@test "Should output an error if no file is passed" {
run ./openapi2jsonschema.py
[ "$status" -eq 1 ]
[ "${lines[0]}" == 'Missing FILE parameter.' ]
[ "${lines[1]}" == 'Usage: ./openapi2jsonschema.py [FILE]' ]
}

View file

@ -122,8 +122,8 @@ def construct_value(load, node):
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Missing FILE parameter.\nUsage: %s [FILE]' % sys.argv[0])
if len(sys.argv) == 0:
print("missing file")
exit(1)
for crdFile in sys.argv[1:]:
@ -155,7 +155,6 @@ if __name__ == "__main__":
filename = filename_format.format(
kind=y["spec"]["names"]["kind"],
group=y["spec"]["group"].split(".")[0],
fullgroup=y["spec"]["group"],
version=version["name"],
).lower() + ".json"
@ -165,7 +164,6 @@ if __name__ == "__main__":
filename = filename_format.format(
kind=y["spec"]["names"]["kind"],
group=y["spec"]["group"].split(".")[0],
fullgroup=y["spec"]["group"],
version=version["name"],
).lower() + ".json"
@ -175,7 +173,6 @@ if __name__ == "__main__":
filename = filename_format.format(
kind=y["spec"]["names"]["kind"],
group=y["spec"]["group"].split(".")[0],
fullgroup=y["spec"]["group"],
version=y["spec"]["version"],
).lower() + ".json"

View file

@ -1,13 +1,13 @@
---
title: "GitHub Action"
title: "Github Action"
date: 2021-07-02T00:00:00Z
draft: false
tags: ["Kubeconform", "Usage"]
weight: 6
---
Kubeconform is publishes Docker Images to GitHub's new Container Registry, ghcr.io. These images
can be used directly in a GitHub Action, once logged in using a [_GitHub Token_](https://github.blog/changelog/2021-03-24-packages-container-registry-now-supports-github_token/).
Kubeconform is publishes Docker Images to Github's new Container Registry, ghcr.io. These images
can be used directly in a Github Action, once logged in using a [_Github Token_](https://github.blog/changelog/2021-03-24-packages-container-registry-now-supports-github_token/).
{{< prism >}}name: kubeconform
on: push
@ -15,7 +15,7 @@ jobs:
kubeconform:
runs-on: ubuntu-latest
steps:
- name: login to GitHub Packages
- name: login to Github Packages
run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin
- uses: actions/checkout@v2
- uses: docker://ghcr.io/yannh/kubeconform:master
@ -24,8 +24,8 @@ jobs:
args: "-summary -output json kubeconfigs/"
{{< /prism >}}
_Note on pricing_: Kubeconform relies on GitHub Container Registry which is currently in Beta. During that period,
_Note on pricing_: Kubeconform relies on Github Container Registry which is currently in Beta. During that period,
[bandwidth is free](https://docs.github.com/en/packages/guides/about-github-container-registry). After that period,
bandwidth costs might be applicable. Since bandwidth from GitHub Packages within GitHub Actions is free, I expect
GitHub Container Registry to also be usable for free within GitHub Actions in the future. If that were not to be the
bandwidth costs might be applicable. Since bandwidth from Github Packages within Github Actions is free, I expect
Github Container Registry to also be usable for free within Github Actions in the future. If that were not to be the
case, I might publish the Docker image to a different platform.

View file

@ -8,7 +8,7 @@
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>

View file

@ -8,7 +8,7 @@
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>
@ -21,7 +21,7 @@
<li><a href=http://kubeconform.mandragor.org/docs/usage/>Usage</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/crd-support/>Custom Resources support</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/json-schema-conversion/>OpenAPI to JSON Schema conversion</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>GitHub Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>Github Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/using-as-a-go-module/>Kubeconform as a Go module</a></li>
</ul>
<div id=main>

View file

@ -5,7 +5,7 @@ For example, for Linux on x86_64 architecture:
curl -L https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64.tar.gz | tar xvzf - &amp;&amp; \ sudo mv kubeconform /usr/local/bin/ MacOs Kubeconform is available to install using Homebrew: $ brew install kubeconform
Windows Download the latest release from our release page.</description></item><item><title>Usage</title><link>http://kubeconform.mandragor.org/docs/usage/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage/</guid><description>$ ./bin/kubeconform -h Usage: ./bin/kubeconform [OPTION]... [FILE OR FOLDER]... -cache string cache schemas downloaded via HTTP to this folder -cpu-prof string debug - log CPU profiling to file -exit-on-error immediately stop execution when the first error is encountered -h show help information -ignore-filename-pattern value regular expression specifying paths to ignore (can be specified multiple times) -ignore-missing-schemas skip files with missing schemas instead of failing -insecure-skip-tls-verify disable verification of the server's SSL certificate.</description></item><item><title>Custom Resources support</title><link>http://kubeconform.mandragor.org/docs/crd-support/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/crd-support/</guid><description>When the -schema-location parameter is not used, or set to &amp;ldquo;default&amp;rdquo;, kubeconform will default to downloading schemas from https://github.com/yannh/kubernetes-json-schema. Kubeconform however supports passing one, or multiple, schemas locations - HTTP(s) URLs, or local filesystem paths, in which case it will lookup for schema definitions in each of them, in order, stopping as soon as a matching file is found.
If the -schema-location value does not end with &amp;lsquo;.json&amp;rsquo;, Kubeconform will assume filenames / a file structure identical to that of kubernetesjsonschema.</description></item><item><title>OpenAPI to JSON Schema conversion</title><link>http://kubeconform.mandragor.org/docs/json-schema-conversion/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/json-schema-conversion/</guid><description>Kubeconform uses JSON schemas to validate Kubernetes resources. For custom resources, the CustomResourceDefinition first needs to be converted to JSON Schema. A script is provided to convert these CustomResourceDefinitions to JSON schema. Here is an example how to use it:
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>GitHub Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to GitHub&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a GitHub Action, once logged in using a GitHub Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to GitHub Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on GitHub Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>Github Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to Github&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a Github Action, once logged in using a Github Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to Github Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on Github Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
Kubeconform contains a package that can be used as a library. An example of usage can be found in examples/main.go
Additional documentation on pkg.go.dev</description></item></channel></rss>

View file

@ -8,7 +8,7 @@
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>
@ -21,7 +21,7 @@
<li><a href=http://kubeconform.mandragor.org/docs/usage/>Usage</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/crd-support/>Custom Resources support</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/json-schema-conversion/>OpenAPI to JSON Schema conversion</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>GitHub Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>Github Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/using-as-a-go-module/>Kubeconform as a Go module</a></li>
</ul>
<div id=main>

View file

@ -8,7 +8,7 @@
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>
@ -21,13 +21,13 @@
<li><a href=http://kubeconform.mandragor.org/docs/usage/>Usage</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/crd-support/>Custom Resources support</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/json-schema-conversion/>OpenAPI to JSON Schema conversion</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>GitHub Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>Github Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/using-as-a-go-module/>Kubeconform as a Go module</a></li>
</ul>
<div id=main>
<div class=navig>
<a href=http://kubeconform.mandragor.org/docs/crd-support/ id=prev>&lt; Custom Resources support</a>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=next>GitHub Action ></a>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=next>Github Action ></a>
</div>
<div id=content-text>
<h1>OpenAPI to JSON Schema conversion</h1>
@ -47,7 +47,7 @@ JSON schema written to trainingjob-sagemaker-v1.json
</div>
<div class=navig>
<a href=http://kubeconform.mandragor.org/docs/crd-support/ id=prev>&lt; Custom Resources support</a>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=next>GitHub Action ></a>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=next>Github Action ></a>
</div>
<script defer src=/js/prism.js></script>
</div>

View file

@ -2,13 +2,13 @@
<meta charset=utf-8>
<meta name=author content="Yann Hamon">
<link rel=stylesheet type=text/css href=/css/style.css><link rel=stylesheet type=text/css href=/css/prism.css>
<title>Kubeconform - Fast Kubernetes manifests validation! | GitHub Action</title>
<title>Kubeconform - Fast Kubernetes manifests validation! | Github Action</title>
</head>
<body>
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>
@ -21,7 +21,7 @@
<li><a href=http://kubeconform.mandragor.org/docs/usage/>Usage</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/crd-support/>Custom Resources support</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/json-schema-conversion/>OpenAPI to JSON Schema conversion</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>GitHub Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>Github Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/using-as-a-go-module/>Kubeconform as a Go module</a></li>
</ul>
<div id=main>
@ -30,16 +30,16 @@
<a href=http://kubeconform.mandragor.org/docs/using-as-a-go-module/ id=next>Kubeconform as a Go module ></a>
</div>
<div id=content-text>
<h1>GitHub Action</h1>
<p>Kubeconform is publishes Docker Images to GitHub&rsquo;s new Container Registry, ghcr.io. These images
can be used directly in a GitHub Action, once logged in using a <a href=https://github.blog/changelog/2021-03-24-packages-container-registry-now-supports-github_token/><em>GitHub Token</em></a>.</p>
<h1>Github Action</h1>
<p>Kubeconform is publishes Docker Images to Github&rsquo;s new Container Registry, ghcr.io. These images
can be used directly in a Github Action, once logged in using a <a href=https://github.blog/changelog/2021-03-24-packages-container-registry-now-supports-github_token/><em>Github Token</em></a>.</p>
<pre><code class=language-bash>name: kubeconform
on: push
jobs:
kubeconform:
runs-on: ubuntu-latest
steps:
- name: login to GitHub Packages
- name: login to Github Packages
run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin
- uses: actions/checkout@v2
- uses: docker://ghcr.io/yannh/kubeconform:master
@ -47,10 +47,10 @@ jobs:
entrypoint: '/kubeconform'
args: "-summary -output json kubeconfigs/"
</code></pre>
<p><em>Note on pricing</em>: Kubeconform relies on GitHub Container Registry which is currently in Beta. During that period,
<p><em>Note on pricing</em>: Kubeconform relies on Github Container Registry which is currently in Beta. During that period,
<a href=https://docs.github.com/en/packages/guides/about-github-container-registry>bandwidth is free</a>. After that period,
bandwidth costs might be applicable. Since bandwidth from GitHub Packages within GitHub Actions is free, I expect
GitHub Container Registry to also be usable for free within GitHub Actions in the future. If that were not to be the
bandwidth costs might be applicable. Since bandwidth from Github Packages within Github Actions is free, I expect
Github Container Registry to also be usable for free within Github Actions in the future. If that were not to be the
case, I might publish the Docker image to a different platform.</p>
</div>
<div class=navig>

View file

@ -8,7 +8,7 @@
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>
@ -21,7 +21,7 @@
<li><a href=http://kubeconform.mandragor.org/docs/usage/>Usage</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/crd-support/>Custom Resources support</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/json-schema-conversion/>OpenAPI to JSON Schema conversion</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>GitHub Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>Github Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/using-as-a-go-module/>Kubeconform as a Go module</a></li>
</ul>
<div id=main>

View file

@ -8,7 +8,7 @@
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>
@ -21,12 +21,12 @@
<li><a href=http://kubeconform.mandragor.org/docs/usage/>Usage</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/crd-support/>Custom Resources support</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/json-schema-conversion/>OpenAPI to JSON Schema conversion</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>GitHub Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/>Github Action</a></li>
<li><a href=http://kubeconform.mandragor.org/docs/using-as-a-go-module/>Kubeconform as a Go module</a></li>
</ul>
<div id=main>
<div class=navig>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=prev>&lt; GitHub Action</a>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=prev>&lt; Github Action</a>
<a href=# id=prev></a>
</div>
<div id=content-text>
@ -37,7 +37,7 @@ An example of usage can be found in <a href=https://github.com/yannh/kubeconform
<p>Additional documentation on <a href=https://pkg.go.dev/github.com/yannh/kubeconform/pkg/validator>pkg.go.dev</a></p>
</div>
<div class=navig>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=prev>&lt; GitHub Action</a>
<a href=http://kubeconform.mandragor.org/docs/usage-as-github-action/ id=prev>&lt; Github Action</a>
<a href=# id=prev></a>
</div>
<script defer src=/js/prism.js></script>

View file

@ -9,7 +9,7 @@
<div id=main-container><div id=header>
<ul id=navigation>
<li><a href=/about>About</a></li>
<li><a href=https://github.com/yannh/kubeconform/>GitHub</a></li>
<li><a href=https://github.com/yannh/kubeconform/>Github</a></li>
<li><a href=/docs/installation/>Docs</a></li>
<li><a href=/>Home</a></li>
</ul>

View file

@ -5,8 +5,8 @@ For example, for Linux on x86_64 architecture:
curl -L https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64.tar.gz | tar xvzf - &amp;&amp; \ sudo mv kubeconform /usr/local/bin/ MacOs Kubeconform is available to install using Homebrew: $ brew install kubeconform
Windows Download the latest release from our release page.</description></item><item><title>Usage</title><link>http://kubeconform.mandragor.org/docs/usage/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage/</guid><description>$ ./bin/kubeconform -h Usage: ./bin/kubeconform [OPTION]... [FILE OR FOLDER]... -cache string cache schemas downloaded via HTTP to this folder -cpu-prof string debug - log CPU profiling to file -exit-on-error immediately stop execution when the first error is encountered -h show help information -ignore-filename-pattern value regular expression specifying paths to ignore (can be specified multiple times) -ignore-missing-schemas skip files with missing schemas instead of failing -insecure-skip-tls-verify disable verification of the server's SSL certificate.</description></item><item><title>Custom Resources support</title><link>http://kubeconform.mandragor.org/docs/crd-support/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/crd-support/</guid><description>When the -schema-location parameter is not used, or set to &amp;ldquo;default&amp;rdquo;, kubeconform will default to downloading schemas from https://github.com/yannh/kubernetes-json-schema. Kubeconform however supports passing one, or multiple, schemas locations - HTTP(s) URLs, or local filesystem paths, in which case it will lookup for schema definitions in each of them, in order, stopping as soon as a matching file is found.
If the -schema-location value does not end with &amp;lsquo;.json&amp;rsquo;, Kubeconform will assume filenames / a file structure identical to that of kubernetesjsonschema.</description></item><item><title>OpenAPI to JSON Schema conversion</title><link>http://kubeconform.mandragor.org/docs/json-schema-conversion/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/json-schema-conversion/</guid><description>Kubeconform uses JSON schemas to validate Kubernetes resources. For custom resources, the CustomResourceDefinition first needs to be converted to JSON Schema. A script is provided to convert these CustomResourceDefinitions to JSON schema. Here is an example how to use it:
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>GitHub Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to GitHub&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a GitHub Action, once logged in using a GitHub Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to GitHub Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on GitHub Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>Github Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to Github&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a Github Action, once logged in using a Github Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to Github Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on Github Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
Kubeconform contains a package that can be used as a library. An example of usage can be found in examples/main.go
Additional documentation on pkg.go.dev</description></item><item><title>About</title><link>http://kubeconform.mandragor.org/about/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/about/</guid><description>Kubeconform is a Kubernetes manifests validation tool. Build it into your CI to validate your Kubernetes configuration!
It is inspired by, contains code from and is designed to stay close to Kubeval, but with the following improvements:

View file

@ -25,7 +25,7 @@
</div>
</div><div id="footer">
<h3>GitHub</h3>
<h3>Github</h3>
</div>
</body>
</html>

View file

@ -5,8 +5,8 @@ For example, for Linux on x86_64 architecture:
curl -L https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64.tar.gz | tar xvzf - &amp;&amp; \ sudo mv kubeconform /usr/local/bin/ MacOs Kubeconform is available to install using Homebrew: $ brew install kubeconform
Windows Download the latest release from our release page.</description></item><item><title>Usage</title><link>http://kubeconform.mandragor.org/docs/usage/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage/</guid><description>$ ./bin/kubeconform -h Usage: ./bin/kubeconform [OPTION]... [FILE OR FOLDER]... -cache string cache schemas downloaded via HTTP to this folder -cpu-prof string debug - log CPU profiling to file -exit-on-error immediately stop execution when the first error is encountered -h show help information -ignore-filename-pattern value regular expression specifying paths to ignore (can be specified multiple times) -ignore-missing-schemas skip files with missing schemas instead of failing -insecure-skip-tls-verify disable verification of the server's SSL certificate.</description></item><item><title>Custom Resources support</title><link>http://kubeconform.mandragor.org/docs/crd-support/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/crd-support/</guid><description>When the -schema-location parameter is not used, or set to &amp;ldquo;default&amp;rdquo;, kubeconform will default to downloading schemas from https://github.com/yannh/kubernetes-json-schema. Kubeconform however supports passing one, or multiple, schemas locations - HTTP(s) URLs, or local filesystem paths, in which case it will lookup for schema definitions in each of them, in order, stopping as soon as a matching file is found.
If the -schema-location value does not end with &amp;lsquo;.json&amp;rsquo;, Kubeconform will assume filenames / a file structure identical to that of kubernetesjsonschema.</description></item><item><title>OpenAPI to JSON Schema conversion</title><link>http://kubeconform.mandragor.org/docs/json-schema-conversion/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/json-schema-conversion/</guid><description>Kubeconform uses JSON schemas to validate Kubernetes resources. For custom resources, the CustomResourceDefinition first needs to be converted to JSON Schema. A script is provided to convert these CustomResourceDefinitions to JSON schema. Here is an example how to use it:
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>GitHub Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to GitHub&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a GitHub Action, once logged in using a GitHub Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to GitHub Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on GitHub Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>Github Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to Github&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a Github Action, once logged in using a Github Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to Github Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on Github Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
Kubeconform contains a package that can be used as a library. An example of usage can be found in examples/main.go
Additional documentation on pkg.go.dev</description></item><item><title>About</title><link>http://kubeconform.mandragor.org/about/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/about/</guid><description>Kubeconform is a Kubernetes manifests validation tool. Build it into your CI to validate your Kubernetes configuration!
It is inspired by, contains code from and is designed to stay close to Kubeval, but with the following improvements:

View file

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8" standalone="yes"?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Usage on Kubeconform - Fast Kubernetes manifests validation!</title><link>http://kubeconform.mandragor.org/tags/usage/</link><description>Recent content in Usage on Kubeconform - Fast Kubernetes manifests validation!</description><generator>Hugo -- gohugo.io</generator><language>en-us</language><lastBuildDate>Fri, 02 Jul 2021 00:00:00 +0000</lastBuildDate><atom:link href="http://kubeconform.mandragor.org/tags/usage/index.xml" rel="self" type="application/rss+xml"/><item><title>Usage</title><link>http://kubeconform.mandragor.org/docs/usage/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage/</guid><description>$ ./bin/kubeconform -h Usage: ./bin/kubeconform [OPTION]... [FILE OR FOLDER]... -cache string cache schemas downloaded via HTTP to this folder -cpu-prof string debug - log CPU profiling to file -exit-on-error immediately stop execution when the first error is encountered -h show help information -ignore-filename-pattern value regular expression specifying paths to ignore (can be specified multiple times) -ignore-missing-schemas skip files with missing schemas instead of failing -insecure-skip-tls-verify disable verification of the server's SSL certificate.</description></item><item><title>Custom Resources support</title><link>http://kubeconform.mandragor.org/docs/crd-support/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/crd-support/</guid><description>When the -schema-location parameter is not used, or set to &amp;ldquo;default&amp;rdquo;, kubeconform will default to downloading schemas from https://github.com/yannh/kubernetes-json-schema. Kubeconform however supports passing one, or multiple, schemas locations - HTTP(s) URLs, or local filesystem paths, in which case it will lookup for schema definitions in each of them, in order, stopping as soon as a matching file is found.
If the -schema-location value does not end with &amp;lsquo;.json&amp;rsquo;, Kubeconform will assume filenames / a file structure identical to that of kubernetesjsonschema.</description></item><item><title>OpenAPI to JSON Schema conversion</title><link>http://kubeconform.mandragor.org/docs/json-schema-conversion/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/json-schema-conversion/</guid><description>Kubeconform uses JSON schemas to validate Kubernetes resources. For custom resources, the CustomResourceDefinition first needs to be converted to JSON Schema. A script is provided to convert these CustomResourceDefinitions to JSON schema. Here is an example how to use it:
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>GitHub Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to GitHub&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a GitHub Action, once logged in using a GitHub Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to GitHub Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on GitHub Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
#!/bin/bash $ ./scripts/openapi2jsonschema.py https://raw.githubusercontent.com/aws/amazon-sagemaker-operator-for-k8s/master/config/crd/bases/sagemaker.aws.amazon.com_trainingjobs.yaml JSON schema written to trainingjob_v1.json The FILENAME_FORMAT environment variable can be used to change the output file name (Available variables: kind, group, version) (Default: {kind}_{version}).</description></item><item><title>Github Action</title><link>http://kubeconform.mandragor.org/docs/usage-as-github-action/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/usage-as-github-action/</guid><description>Kubeconform is publishes Docker Images to Github&amp;rsquo;s new Container Registry, ghcr.io. These images can be used directly in a Github Action, once logged in using a Github Token.
name: kubeconform on: push jobs: kubeconform: runs-on: ubuntu-latest steps: - name: login to Github Packages run: echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin - uses: actions/checkout@v2 - uses: docker://ghcr.io/yannh/kubeconform:master with: entrypoint: '/kubeconform' args: "-summary -output json kubeconfigs/" Note on pricing: Kubeconform relies on Github Container Registry which is currently in Beta.</description></item><item><title>Kubeconform as a Go module</title><link>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</link><pubDate>Fri, 02 Jul 2021 00:00:00 +0000</pubDate><guid>http://kubeconform.mandragor.org/docs/using-as-a-go-module/</guid><description>Warning: This is a work-in-progress, the interface is not yet considered stable. Feedback is encouraged.
Kubeconform contains a package that can be used as a library. An example of usage can be found in examples/main.go
Additional documentation on pkg.go.dev</description></item></channel></rss>

View file

@ -1,7 +1,7 @@
<div id="header">
<ul id="navigation">
<li><a href="/about">About</a></li>
<li><a href="https://github.com/yannh/kubeconform/">GitHub</a></li>
<li><a href="https://github.com/yannh/kubeconform/">Github</a></li>
<li><a href="/docs/installation/">Docs</a></li>
<li><a href="/">Home</a></li>
</ul>

View file

@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View file

@ -1,30 +0,0 @@
# cleanhttp
Functions for accessing "clean" Go http.Client values
-------------
The Go standard library contains a default `http.Client` called
`http.DefaultClient`. It is a common idiom in Go code to start with
`http.DefaultClient` and tweak it as necessary, and in fact, this is
encouraged; from the `http` package documentation:
> The Client's Transport typically has internal state (cached TCP connections),
so Clients should be reused instead of created as needed. Clients are safe for
concurrent use by multiple goroutines.
Unfortunately, this is a shared value, and it is not uncommon for libraries to
assume that they are free to modify it at will. With enough dependencies, it
can be very easy to encounter strange problems and race conditions due to
manipulation of this shared value across libraries and goroutines (clients are
safe for concurrent use, but writing values to the client struct itself is not
protected).
Making things worse is the fact that a bare `http.Client` will use a default
`http.Transport` called `http.DefaultTransport`, which is another global value
that behaves the same way. So it is not simply enough to replace
`http.DefaultClient` with `&http.Client{}`.
This repository provides some simple functions to get a "clean" `http.Client`
-- one that uses the same default values as the Go standard library, but
returns a client that does not share any state with other clients.

View file

@ -1,58 +0,0 @@
package cleanhttp
import (
"net"
"net/http"
"runtime"
"time"
)
// DefaultTransport returns a new http.Transport with similar default values to
// http.DefaultTransport, but with idle connections and keepalives disabled.
func DefaultTransport() *http.Transport {
transport := DefaultPooledTransport()
transport.DisableKeepAlives = true
transport.MaxIdleConnsPerHost = -1
return transport
}
// DefaultPooledTransport returns a new http.Transport with similar default
// values to http.DefaultTransport. Do not use this for transient transports as
// it can leak file descriptors over time. Only use this for transports that
// will be re-used for the same host(s).
func DefaultPooledTransport() *http.Transport {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
ForceAttemptHTTP2: true,
MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
}
return transport
}
// DefaultClient returns a new http.Client with similar default values to
// http.Client, but with a non-shared Transport, idle connections disabled, and
// keepalives disabled.
func DefaultClient() *http.Client {
return &http.Client{
Transport: DefaultTransport(),
}
}
// DefaultPooledClient returns a new http.Client with similar default values to
// http.Client, but with a shared Transport. Do not use this function for
// transient clients as it can leak file descriptors over time. Only use this
// for clients that will be re-used for the same host(s).
func DefaultPooledClient() *http.Client {
return &http.Client{
Transport: DefaultPooledTransport(),
}
}

View file

@ -1,20 +0,0 @@
// Package cleanhttp offers convenience utilities for acquiring "clean"
// http.Transport and http.Client structs.
//
// Values set on http.DefaultClient and http.DefaultTransport affect all
// callers. This can have detrimental effects, esepcially in TLS contexts,
// where client or root certificates set to talk to multiple endpoints can end
// up displacing each other, leading to hard-to-debug issues. This package
// provides non-shared http.Client and http.Transport structs to ensure that
// the configuration will not be overwritten by other parts of the application
// or dependencies.
//
// The DefaultClient and DefaultTransport functions disable idle connections
// and keepalives. Without ensuring that idle connections are closed before
// garbage collection, short-term clients/transports can leak file descriptors,
// eventually leading to "too many open files" errors. If you will be
// connecting to the same hosts repeatedly from the same client, you can use
// DefaultPooledClient to receive a client that has connection pooling
// semantics similar to http.DefaultClient.
//
package cleanhttp

View file

@ -1,48 +0,0 @@
package cleanhttp
import (
"net/http"
"strings"
"unicode"
)
// HandlerInput provides input options to cleanhttp's handlers
type HandlerInput struct {
ErrStatus int
}
// PrintablePathCheckHandler is a middleware that ensures the request path
// contains only printable runes.
func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
// Nil-check on input to make it optional
if input == nil {
input = &HandlerInput{
ErrStatus: http.StatusBadRequest,
}
}
// Default to http.StatusBadRequest on error
if input.ErrStatus == 0 {
input.ErrStatus = http.StatusBadRequest
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r != nil {
// Check URL path for non-printable characters
idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
return !unicode.IsPrint(c)
})
if idx != -1 {
w.WriteHeader(input.ErrStatus)
return
}
if next != nil {
next.ServeHTTP(w, r)
}
}
return
})
}

View file

@ -1,4 +0,0 @@
.idea/
*.iml
*.test
.vscode/

View file

@ -1 +0,0 @@
1.22.2

View file

@ -1,33 +0,0 @@
## 0.7.7 (May 30, 2024)
BUG FIXES:
- client: avoid potentially leaking URL-embedded basic authentication credentials in logs (#158)
## 0.7.6 (May 9, 2024)
ENHANCEMENTS:
- client: support a `RetryPrepare` function for modifying the request before retrying (#216)
- client: support HTTP-date values for `Retry-After` header value (#138)
- client: avoid reading entire body when the body is a `*bytes.Reader` (#197)
BUG FIXES:
- client: fix a broken check for invalid server certificate in go 1.20+ (#210)
## 0.7.5 (Nov 8, 2023)
BUG FIXES:
- client: fixes an issue where the request body is not preserved on temporary redirects or re-established HTTP/2 connections (#207)
## 0.7.4 (Jun 6, 2023)
BUG FIXES:
- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 (#194)
## 0.7.3 (May 15, 2023)
Initial release

View file

@ -1 +0,0 @@
* @hashicorp/go-retryablehttp-maintainers

View file

@ -1,365 +0,0 @@
Copyright (c) 2015 HashiCorp, Inc.
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View file

@ -1,11 +0,0 @@
default: test
test:
go vet ./...
go test -v -race ./...
updatedeps:
go get -f -t -u ./...
go get -f -u ./...
.PHONY: default test updatedeps

View file

@ -1,62 +0,0 @@
go-retryablehttp
================
[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis]
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[travis]: http://travis-ci.org/hashicorp/go-retryablehttp
[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp
The `retryablehttp` package provides a familiar HTTP client interface with
automatic retries and exponential backoff. It is a thin wrapper over the
standard `net/http` client library and exposes nearly the same public API. This
makes `retryablehttp` very easy to drop into existing programs.
`retryablehttp` performs automatic retries under certain conditions. Mainly, if
an error is returned by the client (connection errors, etc.), or if a 500-range
response code is received (except 501), then a retry is invoked after a wait
period. Otherwise, the response is returned and left to the caller to
interpret.
The main difference from `net/http` is that requests which take a request body
(POST/PUT et. al) can have the body provided in a number of ways (some more or
less efficient) that allow "rewinding" the request body if the initial request
fails so that the full request can be attempted again. See the
[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more
details.
Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required.
From 0.6.7 onward, Go 1.13+ is required.
Example Use
===========
Using this library should look almost identical to what you would do with
`net/http`. The most simple example of a GET request is shown below:
```go
resp, err := retryablehttp.Get("/foo")
if err != nil {
panic(err)
}
```
The returned response object is an `*http.Response`, the same thing you would
usually get from `net/http`. Had the request failed one or more times, the above
call would block and retry with exponential backoff.
## Getting a stdlib `*http.Client` with retries
It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`.
This makes use of retryablehttp broadly applicable with minimal effort. Simply
configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`:
```go
retryClient := retryablehttp.NewClient()
retryClient.RetryMax = 10
standardClient := retryClient.StandardClient() // *http.Client
```
For more usage and examples see the
[pkg.go.dev](https://pkg.go.dev/github.com/hashicorp/go-retryablehttp).

View file

@ -1,14 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:build !go1.20
// +build !go1.20
package retryablehttp
import "crypto/x509"
func isCertError(err error) bool {
_, ok := err.(x509.UnknownAuthorityError)
return ok
}

View file

@ -1,14 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:build go1.20
// +build go1.20
package retryablehttp
import "crypto/tls"
func isCertError(err error) bool {
_, ok := err.(*tls.CertificateVerificationError)
return ok
}

View file

@ -1,919 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package retryablehttp provides a familiar HTTP client interface with
// automatic retries and exponential backoff. It is a thin wrapper over the
// standard net/http client library and exposes nearly the same public API.
// This makes retryablehttp very easy to drop into existing programs.
//
// retryablehttp performs automatic retries under certain conditions. Mainly, if
// an error is returned by the client (connection errors etc), or if a 500-range
// response is received, then a retry is invoked. Otherwise, the response is
// returned and left to the caller to interpret.
//
// Requests which take a request body should provide a non-nil function
// parameter. The best choice is to provide either a function satisfying
// ReaderFunc which provides multiple io.Readers in an efficient manner, a
// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte
// slice. As it is a reference type, and we will wrap it as needed by readers,
// we can efficiently re-use the request body without needing to copy it. If an
// io.Reader (such as a *bytes.Reader) is provided, the full body will be read
// prior to the first request, and will be efficiently re-used for any retries.
// ReadSeeker can be used, but some users have observed occasional data races
// between the net/http library and the Seek functionality of some
// implementations of ReadSeeker, so should be avoided if possible.
package retryablehttp
import (
"bytes"
"context"
"fmt"
"io"
"log"
"math"
"math/rand"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
)
var (
// Default retry configuration
defaultRetryWaitMin = 1 * time.Second
defaultRetryWaitMax = 30 * time.Second
defaultRetryMax = 4
// defaultLogger is the logger provided with defaultClient
defaultLogger = log.New(os.Stderr, "", log.LstdFlags)
// defaultClient is used for performing requests without explicitly making
// a new client. It is purposely private to avoid modifications.
defaultClient = NewClient()
// We need to consume response bodies to maintain http connections, but
// limit the size we consume to respReadLimit.
respReadLimit = int64(4096)
// timeNow sets the function that returns the current time.
// This defaults to time.Now. Changes to this should only be done in tests.
timeNow = time.Now
// A regular expression to match the error returned by net/http when the
// configured number of redirects is exhausted. This error isn't typed
// specifically so we resort to matching on the error string.
redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`)
// A regular expression to match the error returned by net/http when the
// scheme specified in the URL is invalid. This error isn't typed
// specifically so we resort to matching on the error string.
schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`)
// A regular expression to match the error returned by net/http when a
// request header or value is invalid. This error isn't typed
// specifically so we resort to matching on the error string.
invalidHeaderErrorRe = regexp.MustCompile(`invalid header`)
// A regular expression to match the error returned by net/http when the
// TLS certificate is not trusted. This error isn't typed
// specifically so we resort to matching on the error string.
notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`)
)
// ReaderFunc is the type of function that can be given natively to NewRequest
type ReaderFunc func() (io.Reader, error)
// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it.
// The ResponseHandlerFunc is called when the HTTP client successfully receives a response and the
// CheckRetry function indicates that a retry of the base request is not necessary.
// If an error is returned from this function, the CheckRetry policy will be used to determine
// whether to retry the whole request (including this handler).
//
// Make sure to check status codes! Even if the request was completed it may have a non-2xx status code.
//
// The response body is not automatically closed. It must be closed either by the ResponseHandlerFunc or
// by the caller out-of-band. Failure to do so will result in a memory leak.
type ResponseHandlerFunc func(*http.Response) error
// LenReader is an interface implemented by many in-memory io.Reader's. Used
// for automatically sending the right Content-Length header when possible.
type LenReader interface {
Len() int
}
// Request wraps the metadata needed to create HTTP requests.
type Request struct {
// body is a seekable reader over the request body payload. This is
// used to rewind the request data in between retries.
body ReaderFunc
responseHandler ResponseHandlerFunc
// Embed an HTTP request directly. This makes a *Request act exactly
// like an *http.Request so that all meta methods are supported.
*http.Request
}
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
// with its context changed to ctx. The provided ctx must be non-nil.
func (r *Request) WithContext(ctx context.Context) *Request {
return &Request{
body: r.body,
responseHandler: r.responseHandler,
Request: r.Request.WithContext(ctx),
}
}
// SetResponseHandler allows setting the response handler.
func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) {
r.responseHandler = fn
}
// BodyBytes allows accessing the request body. It is an analogue to
// http.Request's Body variable, but it returns a copy of the underlying data
// rather than consuming it.
//
// This function is not thread-safe; do not call it at the same time as another
// call, or at the same time this request is being used with Client.Do.
func (r *Request) BodyBytes() ([]byte, error) {
if r.body == nil {
return nil, nil
}
body, err := r.body()
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
_, err = buf.ReadFrom(body)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// SetBody allows setting the request body.
//
// It is useful if a new body needs to be set without constructing a new Request.
func (r *Request) SetBody(rawBody interface{}) error {
bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody)
if err != nil {
return err
}
r.body = bodyReader
r.ContentLength = contentLength
if bodyReader != nil {
r.GetBody = func() (io.ReadCloser, error) {
body, err := bodyReader()
if err != nil {
return nil, err
}
if rc, ok := body.(io.ReadCloser); ok {
return rc, nil
}
return io.NopCloser(body), nil
}
} else {
r.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil }
}
return nil
}
// WriteTo allows copying the request body into a writer.
//
// It writes data to w until there's no more data to write or
// when an error occurs. The return int64 value is the number of bytes
// written. Any error encountered during the write is also returned.
// The signature matches io.WriterTo interface.
func (r *Request) WriteTo(w io.Writer) (int64, error) {
body, err := r.body()
if err != nil {
return 0, err
}
if c, ok := body.(io.Closer); ok {
defer c.Close()
}
return io.Copy(w, body)
}
func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) {
var bodyReader ReaderFunc
var contentLength int64
switch body := rawBody.(type) {
// If they gave us a function already, great! Use it.
case ReaderFunc:
bodyReader = body
tmp, err := body()
if err != nil {
return nil, 0, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
case func() (io.Reader, error):
bodyReader = body
tmp, err := body()
if err != nil {
return nil, 0, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
// If a regular byte slice, we can read it over and over via new
// readers
case []byte:
buf := body
bodyReader = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
// If a bytes.Buffer we can read the underlying byte slice over and
// over
case *bytes.Buffer:
buf := body
bodyReader = func() (io.Reader, error) {
return bytes.NewReader(buf.Bytes()), nil
}
contentLength = int64(buf.Len())
// We prioritize *bytes.Reader here because we don't really want to
// deal with it seeking so want it to match here instead of the
// io.ReadSeeker case.
case *bytes.Reader:
snapshot := *body
bodyReader = func() (io.Reader, error) {
r := snapshot
return &r, nil
}
contentLength = int64(body.Len())
// Compat case
case io.ReadSeeker:
raw := body
bodyReader = func() (io.Reader, error) {
_, err := raw.Seek(0, 0)
return io.NopCloser(raw), err
}
if lr, ok := raw.(LenReader); ok {
contentLength = int64(lr.Len())
}
// Read all in so we can reset
case io.Reader:
buf, err := io.ReadAll(body)
if err != nil {
return nil, 0, err
}
if len(buf) == 0 {
bodyReader = func() (io.Reader, error) {
return http.NoBody, nil
}
contentLength = 0
} else {
bodyReader = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
}
// No body provided, nothing to do
case nil:
// Unrecognized type
default:
return nil, 0, fmt.Errorf("cannot handle type %T", rawBody)
}
return bodyReader, contentLength, nil
}
// FromRequest wraps an http.Request in a retryablehttp.Request
func FromRequest(r *http.Request) (*Request, error) {
bodyReader, _, err := getBodyReaderAndContentLength(r.Body)
if err != nil {
return nil, err
}
// Could assert contentLength == r.ContentLength
return &Request{body: bodyReader, Request: r}, nil
}
// NewRequest creates a new wrapped request.
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
return NewRequestWithContext(context.Background(), method, url, rawBody)
}
// NewRequestWithContext creates a new wrapped request with the provided context.
//
// The context controls the entire lifetime of a request and its response:
// obtaining a connection, sending the request, and reading the response headers and body.
func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) {
httpReq, err := http.NewRequestWithContext(ctx, method, url, nil)
if err != nil {
return nil, err
}
req := &Request{
Request: httpReq,
}
if err := req.SetBody(rawBody); err != nil {
return nil, err
}
return req, nil
}
// Logger interface allows to use other loggers than
// standard log.Logger.
type Logger interface {
Printf(string, ...interface{})
}
// LeveledLogger is an interface that can be implemented by any logger or a
// logger wrapper to provide leveled logging. The methods accept a message
// string and a variadic number of key-value pairs. For log.Printf style
// formatting where message string contains a format specifier, use Logger
// interface.
type LeveledLogger interface {
Error(msg string, keysAndValues ...interface{})
Info(msg string, keysAndValues ...interface{})
Debug(msg string, keysAndValues ...interface{})
Warn(msg string, keysAndValues ...interface{})
}
// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions
// without changing the API.
type hookLogger struct {
LeveledLogger
}
func (h hookLogger) Printf(s string, args ...interface{}) {
h.Info(fmt.Sprintf(s, args...))
}
// RequestLogHook allows a function to run before each retry. The HTTP
// request which will be made, and the retry number (0 for the initial
// request) are available to users. The internal logger is exposed to
// consumers.
type RequestLogHook func(Logger, *http.Request, int)
// ResponseLogHook is like RequestLogHook, but allows running a function
// on each HTTP response. This function will be invoked at the end of
// every HTTP request executed, regardless of whether a subsequent retry
// needs to be performed or not. If the response body is read or closed
// from this method, this will affect the response returned from Do().
type ResponseLogHook func(Logger, *http.Response)
// CheckRetry specifies a policy for handling retries. It is called
// following each request with the response and error values returned by
// the http.Client. If CheckRetry returns false, the Client stops retrying
// and returns the response to the caller. If CheckRetry returns an error,
// that error value is returned in lieu of the error from the request. The
// Client will close any response body when retrying, but if the retry is
// aborted it is up to the CheckRetry callback to properly close any
// response body before returning.
type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error)
// Backoff specifies a policy for how long to wait between retries.
// It is called after a failing request to determine the amount of time
// that should pass before trying again.
type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration
// ErrorHandler is called if retries are expired, containing the last status
// from the http library. If not specified, default behavior for the library is
// to close the body and return an error indicating how many tries were
// attempted. If overriding this, be sure to close the body if needed.
type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error)
// PrepareRetry is called before retry operation. It can be used for example to re-sign the request
type PrepareRetry func(req *http.Request) error
// Client is used to make HTTP requests. It adds additional functionality
// like automatic retries to tolerate minor outages.
type Client struct {
HTTPClient *http.Client // Internal HTTP client.
Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger
RetryWaitMin time.Duration // Minimum time to wait
RetryWaitMax time.Duration // Maximum time to wait
RetryMax int // Maximum number of retries
// RequestLogHook allows a user-supplied function to be called
// before each retry.
RequestLogHook RequestLogHook
// ResponseLogHook allows a user-supplied function to be called
// with the response from each HTTP request executed.
ResponseLogHook ResponseLogHook
// CheckRetry specifies the policy for handling retries, and is called
// after each request. The default policy is DefaultRetryPolicy.
CheckRetry CheckRetry
// Backoff specifies the policy for how long to wait between retries
Backoff Backoff
// ErrorHandler specifies the custom error handler to use, if any
ErrorHandler ErrorHandler
// PrepareRetry can prepare the request for retry operation, for example re-sign it
PrepareRetry PrepareRetry
loggerInit sync.Once
clientInit sync.Once
}
// NewClient creates a new Client with default settings.
func NewClient() *Client {
return &Client{
HTTPClient: cleanhttp.DefaultPooledClient(),
Logger: defaultLogger,
RetryWaitMin: defaultRetryWaitMin,
RetryWaitMax: defaultRetryWaitMax,
RetryMax: defaultRetryMax,
CheckRetry: DefaultRetryPolicy,
Backoff: DefaultBackoff,
}
}
func (c *Client) logger() interface{} {
c.loggerInit.Do(func() {
if c.Logger == nil {
return
}
switch c.Logger.(type) {
case Logger, LeveledLogger:
// ok
default:
// This should happen in dev when they are setting Logger and work on code, not in prod.
panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger))
}
})
return c.Logger
}
// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
// will retry on connection errors and server errors.
func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
// do not retry on context.Canceled or context.DeadlineExceeded
if ctx.Err() != nil {
return false, ctx.Err()
}
// don't propagate other errors
shouldRetry, _ := baseRetryPolicy(resp, err)
return shouldRetry, nil
}
// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it
// propagates errors back instead of returning nil. This allows you to inspect
// why it decided to retry or not.
func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
// do not retry on context.Canceled or context.DeadlineExceeded
if ctx.Err() != nil {
return false, ctx.Err()
}
return baseRetryPolicy(resp, err)
}
func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
if err != nil {
if v, ok := err.(*url.Error); ok {
// Don't retry if the error was due to too many redirects.
if redirectsErrorRe.MatchString(v.Error()) {
return false, v
}
// Don't retry if the error was due to an invalid protocol scheme.
if schemeErrorRe.MatchString(v.Error()) {
return false, v
}
// Don't retry if the error was due to an invalid header.
if invalidHeaderErrorRe.MatchString(v.Error()) {
return false, v
}
// Don't retry if the error was due to TLS cert verification failure.
if notTrustedErrorRe.MatchString(v.Error()) {
return false, v
}
if isCertError(v.Err) {
return false, v
}
}
// The error is likely recoverable so retry.
return true, nil
}
// 429 Too Many Requests is recoverable. Sometimes the server puts
// a Retry-After response header to indicate when the server is
// available to start processing request from client.
if resp.StatusCode == http.StatusTooManyRequests {
return true, nil
}
// Check the response code. We retry on 500-range responses to allow
// the server time to recover, as 500's are typically not permanent
// errors and may relate to outages on the server side. This will catch
// invalid response codes as well, like 0 and 999.
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) {
return true, fmt.Errorf("unexpected HTTP status %s", resp.Status)
}
return false, nil
}
// DefaultBackoff provides a default callback for Client.Backoff which
// will perform exponential backoff based on the attempt number and limited
// by the provided minimum and maximum durations.
//
// It also tries to parse Retry-After response header when a http.StatusTooManyRequests
// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of
// seconds the server states it may be ready to process more requests from this client.
func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
if resp != nil {
if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable {
if sleep, ok := parseRetryAfterHeader(resp.Header["Retry-After"]); ok {
return sleep
}
}
}
mult := math.Pow(2, float64(attemptNum)) * float64(min)
sleep := time.Duration(mult)
if float64(sleep) != mult || sleep > max {
sleep = max
}
return sleep
}
// parseRetryAfterHeader parses the Retry-After header and returns the
// delay duration according to the spec: https://httpwg.org/specs/rfc7231.html#header.retry-after
// The bool returned will be true if the header was successfully parsed.
// Otherwise, the header was either not present, or was not parseable according to the spec.
//
// Retry-After headers come in two flavors: Seconds or HTTP-Date
//
// Examples:
// * Retry-After: Fri, 31 Dec 1999 23:59:59 GMT
// * Retry-After: 120
func parseRetryAfterHeader(headers []string) (time.Duration, bool) {
if len(headers) == 0 || headers[0] == "" {
return 0, false
}
header := headers[0]
// Retry-After: 120
if sleep, err := strconv.ParseInt(header, 10, 64); err == nil {
if sleep < 0 { // a negative sleep doesn't make sense
return 0, false
}
return time.Second * time.Duration(sleep), true
}
// Retry-After: Fri, 31 Dec 1999 23:59:59 GMT
retryTime, err := time.Parse(time.RFC1123, header)
if err != nil {
return 0, false
}
if until := retryTime.Sub(timeNow()); until > 0 {
return until, true
}
// date is in the past
return 0, true
}
// LinearJitterBackoff provides a callback for Client.Backoff which will
// perform linear backoff based on the attempt number and with jitter to
// prevent a thundering herd.
//
// min and max here are *not* absolute values. The number to be multiplied by
// the attempt number will be chosen at random from between them, thus they are
// bounding the jitter.
//
// For instance:
// * To get strictly linear backoff of one second increasing each retry, set
// both to one second (1s, 2s, 3s, 4s, ...)
// * To get a small amount of jitter centered around one second increasing each
// retry, set to around one second, such as a min of 800ms and max of 1200ms
// (892ms, 2102ms, 2945ms, 4312ms, ...)
// * To get extreme jitter, set to a very wide spread, such as a min of 100ms
// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...)
func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
// attemptNum always starts at zero but we want to start at 1 for multiplication
attemptNum++
if max <= min {
// Unclear what to do here, or they are the same, so return min *
// attemptNum
return min * time.Duration(attemptNum)
}
// Seed rand; doing this every time is fine
source := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
// Pick a random number that lies somewhere between the min and max and
// multiply by the attemptNum. attemptNum starts at zero so we always
// increment here. We first get a random percentage, then apply that to the
// difference between min and max, and add to min.
jitter := source.Float64() * float64(max-min)
jitterMin := int64(jitter) + int64(min)
return time.Duration(jitterMin * int64(attemptNum))
}
// PassthroughErrorHandler is an ErrorHandler that directly passes through the
// values from the net/http library for the final request. The body is not
// closed.
func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) {
return resp, err
}
// Do wraps calling an HTTP method with retries.
func (c *Client) Do(req *Request) (*http.Response, error) {
c.clientInit.Do(func() {
if c.HTTPClient == nil {
c.HTTPClient = cleanhttp.DefaultPooledClient()
}
})
logger := c.logger()
if logger != nil {
switch v := logger.(type) {
case LeveledLogger:
v.Debug("performing request", "method", req.Method, "url", redactURL(req.URL))
case Logger:
v.Printf("[DEBUG] %s %s", req.Method, redactURL(req.URL))
}
}
var resp *http.Response
var attempt int
var shouldRetry bool
var doErr, respErr, checkErr, prepareErr error
for i := 0; ; i++ {
doErr, respErr, prepareErr = nil, nil, nil
attempt++
// Always rewind the request body when non-nil.
if req.body != nil {
body, err := req.body()
if err != nil {
c.HTTPClient.CloseIdleConnections()
return resp, err
}
if c, ok := body.(io.ReadCloser); ok {
req.Body = c
} else {
req.Body = io.NopCloser(body)
}
}
if c.RequestLogHook != nil {
switch v := logger.(type) {
case LeveledLogger:
c.RequestLogHook(hookLogger{v}, req.Request, i)
case Logger:
c.RequestLogHook(v, req.Request, i)
default:
c.RequestLogHook(nil, req.Request, i)
}
}
// Attempt the request
resp, doErr = c.HTTPClient.Do(req.Request)
// Check if we should continue with retries.
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr)
if !shouldRetry && doErr == nil && req.responseHandler != nil {
respErr = req.responseHandler(resp)
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr)
}
err := doErr
if respErr != nil {
err = respErr
}
if err != nil {
switch v := logger.(type) {
case LeveledLogger:
v.Error("request failed", "error", err, "method", req.Method, "url", redactURL(req.URL))
case Logger:
v.Printf("[ERR] %s %s request failed: %v", req.Method, redactURL(req.URL), err)
}
} else {
// Call this here to maintain the behavior of logging all requests,
// even if CheckRetry signals to stop.
if c.ResponseLogHook != nil {
// Call the response logger function if provided.
switch v := logger.(type) {
case LeveledLogger:
c.ResponseLogHook(hookLogger{v}, resp)
case Logger:
c.ResponseLogHook(v, resp)
default:
c.ResponseLogHook(nil, resp)
}
}
}
if !shouldRetry {
break
}
// We do this before drainBody because there's no need for the I/O if
// we're breaking out
remain := c.RetryMax - i
if remain <= 0 {
break
}
// We're going to retry, consume any response to reuse the connection.
if doErr == nil {
c.drainBody(resp.Body)
}
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
if logger != nil {
desc := fmt.Sprintf("%s %s", req.Method, redactURL(req.URL))
if resp != nil {
desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode)
}
switch v := logger.(type) {
case LeveledLogger:
v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain)
case Logger:
v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
}
}
timer := time.NewTimer(wait)
select {
case <-req.Context().Done():
timer.Stop()
c.HTTPClient.CloseIdleConnections()
return nil, req.Context().Err()
case <-timer.C:
}
// Make shallow copy of http Request so that we can modify its body
// without racing against the closeBody call in persistConn.writeLoop.
httpreq := *req.Request
req.Request = &httpreq
if c.PrepareRetry != nil {
if err := c.PrepareRetry(req.Request); err != nil {
prepareErr = err
break
}
}
}
// this is the closest we have to success criteria
if doErr == nil && respErr == nil && checkErr == nil && prepareErr == nil && !shouldRetry {
return resp, nil
}
defer c.HTTPClient.CloseIdleConnections()
var err error
if prepareErr != nil {
err = prepareErr
} else if checkErr != nil {
err = checkErr
} else if respErr != nil {
err = respErr
} else {
err = doErr
}
if c.ErrorHandler != nil {
return c.ErrorHandler(resp, err, attempt)
}
// By default, we close the response body and return an error without
// returning the response
if resp != nil {
c.drainBody(resp.Body)
}
// this means CheckRetry thought the request was a failure, but didn't
// communicate why
if err == nil {
return nil, fmt.Errorf("%s %s giving up after %d attempt(s)",
req.Method, redactURL(req.URL), attempt)
}
return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w",
req.Method, redactURL(req.URL), attempt, err)
}
// Try to read the response body so we can reuse this connection.
func (c *Client) drainBody(body io.ReadCloser) {
defer body.Close()
_, err := io.Copy(io.Discard, io.LimitReader(body, respReadLimit))
if err != nil {
if c.logger() != nil {
switch v := c.logger().(type) {
case LeveledLogger:
v.Error("error reading response body", "error", err)
case Logger:
v.Printf("[ERR] error reading response body: %v", err)
}
}
}
}
// Get is a shortcut for doing a GET request without making a new client.
func Get(url string) (*http.Response, error) {
return defaultClient.Get(url)
}
// Get is a convenience helper for doing simple GET requests.
func (c *Client) Get(url string) (*http.Response, error) {
req, err := NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Head is a shortcut for doing a HEAD request without making a new client.
func Head(url string) (*http.Response, error) {
return defaultClient.Head(url)
}
// Head is a convenience method for doing simple HEAD requests.
func (c *Client) Head(url string) (*http.Response, error) {
req, err := NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Post is a shortcut for doing a POST request without making a new client.
func Post(url, bodyType string, body interface{}) (*http.Response, error) {
return defaultClient.Post(url, bodyType, body)
}
// Post is a convenience method for doing simple POST requests.
func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) {
req, err := NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return c.Do(req)
}
// PostForm is a shortcut to perform a POST with form data without creating
// a new client.
func PostForm(url string, data url.Values) (*http.Response, error) {
return defaultClient.PostForm(url, data)
}
// PostForm is a convenience method for doing simple POST operations using
// pre-filled url.Values form data.
func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) {
return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
// StandardClient returns a stdlib *http.Client with a custom Transport, which
// shims in a *retryablehttp.Client for added retries.
func (c *Client) StandardClient() *http.Client {
return &http.Client{
Transport: &RoundTripper{Client: c},
}
}
// Taken from url.URL#Redacted() which was introduced in go 1.15.
// We can switch to using it directly if we'll bump the minimum required go version.
func redactURL(u *url.URL) string {
if u == nil {
return ""
}
ru := *u
if _, has := ru.User.Password(); has {
ru.User = url.UserPassword(ru.User.Username(), "xxxxx")
}
return ru.String()
}

View file

@ -1,55 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package retryablehttp
import (
"errors"
"net/http"
"net/url"
"sync"
)
// RoundTripper implements the http.RoundTripper interface, using a retrying
// HTTP client to execute requests.
//
// It is important to note that retryablehttp doesn't always act exactly as a
// RoundTripper should. This is highly dependent on the retryable client's
// configuration.
type RoundTripper struct {
// The client to use during requests. If nil, the default retryablehttp
// client and settings will be used.
Client *Client
// once ensures that the logic to initialize the default client runs at
// most once, in a single thread.
once sync.Once
}
// init initializes the underlying retryable client.
func (rt *RoundTripper) init() {
if rt.Client == nil {
rt.Client = NewClient()
}
}
// RoundTrip satisfies the http.RoundTripper interface.
func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
rt.once.Do(rt.init)
// Convert the request to be retryable.
retryableReq, err := FromRequest(req)
if err != nil {
return nil, err
}
// Execute the request.
resp, err := rt.Client.Do(retryableReq)
// If we got an error returned by standard library's `Do` method, unwrap it
// otherwise we will wind up erroneously re-nesting the error.
if _, ok := err.(*url.Error); ok {
return resp, errors.Unwrap(err)
}
return resp, err
}

View file

@ -0,0 +1,4 @@
.vscode
.idea
*.swp
jv

View file

@ -0,0 +1,215 @@
# jsonschema v5.1.1
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5)
[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v5)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v5)
[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=master)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml)
[![codecov.io](https://codecov.io/github/santhosh-tekuri/jsonschema/coverage.svg?branch=master)](https://codecov.io/github/santhosh-tekuri/jsonschema?branch=master)
Package jsonschema provides json-schema compilation and validation.
[Benchmarks](https://dev.to/vearutop/benchmarking-correctness-and-performance-of-go-json-schema-validators-3247)
### Features:
- implements
[draft 2020-12](https://json-schema.org/specification-links.html#2020-12),
[draft 2019-09](https://json-schema.org/specification-links.html#draft-2019-09-formerly-known-as-draft-8),
[draft-7](https://json-schema.org/specification-links.html#draft-7),
[draft-6](https://json-schema.org/specification-links.html#draft-6),
[draft-4](https://json-schema.org/specification-links.html#draft-4)
- fully compliant with [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), (excluding some optional)
- list of optional tests that are excluded can be found in schema_test.go(variable [skipTests](https://github.com/santhosh-tekuri/jsonschema/blob/master/schema_test.go#L24))
- validates schemas against meta-schema
- full support of remote references
- support of recursive references between schemas
- detects infinite loop in schemas
- thread safe validation
- rich, intuitive hierarchial error messages with json-pointers to exact location
- supports output formats flag, basic and detailed
- supports enabling format and content Assertions in draft2019-09 or above
- change `Compiler.AssertFormat`, `Compiler.AssertContent` to `true`
- compiled schema can be introspected. easier to develop tools like generating go structs given schema
- supports user-defined keywords via [extensions](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-Extension)
- implements following formats (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedFormat))
- date-time, date, time, duration, period (supports leap-second)
- uuid, hostname, email
- ip-address, ipv4, ipv6
- uri, uriref, uri-template(limited validation)
- json-pointer, relative-json-pointer
- regex, format
- implements following contentEncoding (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent))
- base64
- implements following contentMediaType (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent))
- application/json
- can load from files/http/https/[string](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-FromString)/[]byte/io.Reader (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedLoader))
see examples in [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5)
The schema is compiled against the version specified in `$schema` property.
If "$schema" property is missing, it uses latest draft which currently implemented
by this library.
You can force to use specific version, when `$schema` is missing, as follows:
```go
compiler := jsonschema.NewCompiler()
compiler.Draft = jsonschema.Draft4
```
This package supports loading json-schema from filePath and fileURL.
To load json-schema from HTTPURL, add following import:
```go
import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
```
## Rich Errors
The ValidationError returned by Validate method contains detailed context to understand why and where the error is.
schema.json:
```json
{
"$ref": "t.json#/definitions/employee"
}
```
t.json:
```json
{
"definitions": {
"employee": {
"type": "string"
}
}
}
```
doc.json:
```json
1
```
assuming `err` is the ValidationError returned when `doc.json` validated with `schema.json`,
```go
fmt.Printf("%#v\n", err) // using %#v prints errors hierarchy
```
Prints:
```
[I#] [S#] doesn't validate with file:///Users/santhosh/jsonschema/schema.json#
[I#] [S#/$ref] doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'
[I#] [S#/definitions/employee/type] expected string, but got number
```
Here `I` stands for instance document and `S` stands for schema document.
The json-fragments that caused error in instance and schema documents are represented using json-pointer notation.
Nested causes are printed with indent.
To output `err` in `flag` output format:
```go
b, _ := json.MarshalIndent(err.FlagOutput(), "", " ")
fmt.Println(string(b))
```
Prints:
```json
{
"valid": false
}
```
To output `err` in `basic` output format:
```go
b, _ := json.MarshalIndent(err.BasicOutput(), "", " ")
fmt.Println(string(b))
```
Prints:
```json
{
"valid": false,
"errors": [
{
"keywordLocation": "",
"absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#",
"instanceLocation": "",
"error": "doesn't validate with file:///Users/santhosh/jsonschema/schema.json#"
},
{
"keywordLocation": "/$ref",
"absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref",
"instanceLocation": "",
"error": "doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'"
},
{
"keywordLocation": "/$ref/type",
"absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type",
"instanceLocation": "",
"error": "expected string, but got number"
}
]
}
```
To output `err` in `detailed` output format:
```go
b, _ := json.MarshalIndent(err.DetailedOutput(), "", " ")
fmt.Println(string(b))
```
Prints:
```json
{
"valid": false,
"keywordLocation": "",
"absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#",
"instanceLocation": "",
"errors": [
{
"valid": false,
"keywordLocation": "/$ref",
"absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref",
"instanceLocation": "",
"errors": [
{
"valid": false,
"keywordLocation": "/$ref/type",
"absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type",
"instanceLocation": "",
"error": "expected string, but got number"
}
]
}
]
}
```
## CLI
to install `go install github.com/santhosh-tekuri/jsonschema/v5/cmd/jv@latest`
```bash
jv [-draft INT] [-output FORMAT] [-assertformat] [-assertcontent] <json-schema> [<json-doc>]...
-assertcontent
enable content assertions with draft >= 2019
-assertformat
enable format assertions with draft >= 2019
-draft int
draft used when '$schema' attribute is missing. valid values 4, 5, 7, 2019, 2020 (default 2020)
-output string
output format. valid values flag, basic, detailed
```
if no `<json-doc>` arguments are passed, it simply validates the `<json-schema>`.
if `$schema` attribute is missing in schema, it uses latest version. this can be overridden by passing `-draft` flag
exit-code is 1, if there are any validation errors
## Validating YAML Documents
since yaml supports non-string keys, such yaml documents are rendered as invalid json documents.
yaml parser returns `map[interface{}]interface{}` for object, whereas json parser returns `map[string]interface{}`.
this package accepts only `map[string]interface{}`, so we need to manually convert them to `map[string]interface{}`
https://play.golang.org/p/Hhax3MrtD8r
the above example shows how to validate yaml document with jsonschema.
the conversion explained above is implemented by `toStringKeys` function

View file

@ -0,0 +1,771 @@
package jsonschema
import (
"encoding/json"
"fmt"
"io"
"math/big"
"regexp"
"strconv"
"strings"
)
// A Compiler represents a json-schema compiler.
type Compiler struct {
// Draft represents the draft used when '$schema' attribute is missing.
//
// This defaults to latest supported draft (currently 2020-12).
Draft *Draft
resources map[string]*resource
// Extensions is used to register extensions.
extensions map[string]extension
// ExtractAnnotations tells whether schema annotations has to be extracted
// in compiled Schema or not.
ExtractAnnotations bool
// LoadURL loads the document at given absolute URL.
//
// If nil, package global LoadURL is used.
LoadURL func(s string) (io.ReadCloser, error)
// AssertFormat for specifications >= draft2019-09.
AssertFormat bool
// AssertContent for specifications >= draft2019-09.
AssertContent bool
}
// Compile parses json-schema at given url returns, if successful,
// a Schema object that can be used to match against json.
//
// Returned error can be *SchemaError
func Compile(url string) (*Schema, error) {
return NewCompiler().Compile(url)
}
// MustCompile is like Compile but panics if the url cannot be compiled to *Schema.
// It simplifies safe initialization of global variables holding compiled Schemas.
func MustCompile(url string) *Schema {
return NewCompiler().MustCompile(url)
}
// CompileString parses and compiles the given schema with given base url.
func CompileString(url, schema string) (*Schema, error) {
c := NewCompiler()
if err := c.AddResource(url, strings.NewReader(schema)); err != nil {
return nil, err
}
return c.Compile(url)
}
// MustCompileString is like CompileString but panics on error.
// It simplified safe initialization of global variables holding compiled Schema.
func MustCompileString(url, schema string) *Schema {
c := NewCompiler()
if err := c.AddResource(url, strings.NewReader(schema)); err != nil {
panic(err)
}
return c.MustCompile(url)
}
// NewCompiler returns a json-schema Compiler object.
// if '$schema' attribute is missing, it is treated as draft7. to change this
// behavior change Compiler.Draft value
func NewCompiler() *Compiler {
return &Compiler{Draft: latest, resources: make(map[string]*resource), extensions: make(map[string]extension)}
}
// AddResource adds in-memory resource to the compiler.
//
// Note that url must not have fragment
func (c *Compiler) AddResource(url string, r io.Reader) error {
res, err := newResource(url, r)
if err != nil {
return err
}
c.resources[res.url] = res
return nil
}
// MustCompile is like Compile but panics if the url cannot be compiled to *Schema.
// It simplifies safe initialization of global variables holding compiled Schemas.
func (c *Compiler) MustCompile(url string) *Schema {
s, err := c.Compile(url)
if err != nil {
panic(fmt.Sprintf("jsonschema: %#v", err))
}
return s
}
// Compile parses json-schema at given url returns, if successful,
// a Schema object that can be used to match against json.
//
// error returned will be of type *SchemaError
func (c *Compiler) Compile(url string) (*Schema, error) {
// make url absolute
u, err := toAbs(url)
if err != nil {
return nil, &SchemaError{url, err}
}
url = u
sch, err := c.compileURL(url, nil, "#")
if err != nil {
err = &SchemaError{url, err}
}
return sch, err
}
func (c *Compiler) findResource(url string) (*resource, error) {
if _, ok := c.resources[url]; !ok {
// load resource
var rdr io.Reader
if sch, ok := vocabSchemas[url]; ok {
rdr = strings.NewReader(sch)
} else {
loadURL := LoadURL
if c.LoadURL != nil {
loadURL = c.LoadURL
}
r, err := loadURL(url)
if err != nil {
return nil, err
}
defer r.Close()
rdr = r
}
if err := c.AddResource(url, rdr); err != nil {
return nil, err
}
}
r := c.resources[url]
if r.draft != nil {
return r, nil
}
// set draft
r.draft = c.Draft
if m, ok := r.doc.(map[string]interface{}); ok {
if sch, ok := m["$schema"]; ok {
sch, ok := sch.(string)
if !ok {
return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url)
}
if !isURI(sch) {
return nil, fmt.Errorf("jsonschema: $schema must be uri in %s", url)
}
r.draft = findDraft(sch)
if r.draft == nil {
sch, _ := split(sch)
if sch == url {
return nil, fmt.Errorf("jsonschema: unsupported draft in %s", url)
}
mr, err := c.findResource(sch)
if err != nil {
return nil, err
}
r.draft = mr.draft
}
}
}
id, err := r.draft.resolveID(r.url, r.doc)
if err != nil {
return nil, err
}
if id != "" {
r.url = id
}
if err := r.fillSubschemas(c, r); err != nil {
return nil, err
}
return r, nil
}
func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) {
// if url points to a draft, return Draft.meta
if d := findDraft(url); d != nil && d.meta != nil {
return d.meta, nil
}
b, f := split(url)
r, err := c.findResource(b)
if err != nil {
return nil, err
}
return c.compileRef(r, stack, ptr, r, f)
}
func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) {
base := r.baseURL(res.floc)
ref, err := resolveURL(base, ref)
if err != nil {
return nil, err
}
u, f := split(ref)
sr := r.findResource(u)
if sr == nil {
// external resource
return c.compileURL(ref, stack, refPtr)
}
// ensure root resource is always compiled first.
// this is required to get schema.meta from root resource
if r.schema == nil {
r.schema = newSchema(r.url, r.floc, r.doc)
if _, err := c.compile(r, nil, schemaRef{"#", r.schema, false}, r); err != nil {
return nil, err
}
}
sr, err = r.resolveFragment(c, sr, f)
if err != nil {
return nil, err
}
if sr == nil {
return nil, fmt.Errorf("jsonschema: %s not found", ref)
}
if sr.schema != nil {
if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil {
return nil, err
}
return sr.schema, nil
}
sr.schema = newSchema(r.url, sr.floc, sr.doc)
return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr)
}
func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error {
if r.draft.version < 2020 {
return nil
}
rr := r.listResources(res)
rr = append(rr, res)
for _, sr := range rr {
if m, ok := sr.doc.(map[string]interface{}); ok {
if _, ok := m["$dynamicAnchor"]; ok {
sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc)
if err != nil {
return err
}
res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch)
}
}
}
return nil
}
func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) {
if err := c.compileDynamicAnchors(r, res); err != nil {
return nil, err
}
switch v := res.doc.(type) {
case bool:
res.schema.Always = &v
return res.schema, nil
default:
return res.schema, c.compileMap(r, stack, sref, res)
}
}
func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error {
m := res.doc.(map[string]interface{})
if err := checkLoop(stack, sref); err != nil {
return err
}
stack = append(stack, sref)
var s = res.schema
var err error
if r == res { // root schema
if sch, ok := m["$schema"]; ok {
sch := sch.(string)
if d := findDraft(sch); d != nil {
s.meta = d.meta
} else {
if s.meta, err = c.compileRef(r, stack, "$schema", res, sch); err != nil {
return err
}
}
}
}
if ref, ok := m["$ref"]; ok {
s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string))
if err != nil {
return err
}
if r.draft.version < 2019 {
// All other properties in a "$ref" object MUST be ignored
return nil
}
}
if r.draft.version >= 2019 {
if r == res { // root schema
if vocab, ok := m["$vocabulary"]; ok {
for url := range vocab.(map[string]interface{}) {
if !r.draft.isVocab(url) {
return fmt.Errorf("jsonschema: unsupported vocab %q in %s", url, res)
}
s.vocab = append(s.vocab, url)
}
} else {
s.vocab = r.draft.defaultVocab
}
}
if ref, ok := m["$recursiveRef"]; ok {
s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string))
if err != nil {
return err
}
}
}
if r.draft.version >= 2020 {
if dref, ok := m["$dynamicRef"]; ok {
s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string))
if err != nil {
return err
}
}
}
loadInt := func(pname string) int {
if num, ok := m[pname]; ok {
i, _ := num.(json.Number).Float64()
return int(i)
}
return -1
}
loadRat := func(pname string) *big.Rat {
if num, ok := m[pname]; ok {
r, _ := new(big.Rat).SetString(string(num.(json.Number)))
return r
}
return nil
}
if r.draft.version < 2019 || r.schema.meta.hasVocab("validation") {
if t, ok := m["type"]; ok {
switch t := t.(type) {
case string:
s.Types = []string{t}
case []interface{}:
s.Types = toStrings(t)
}
}
if e, ok := m["enum"]; ok {
s.Enum = e.([]interface{})
allPrimitives := true
for _, item := range s.Enum {
switch jsonType(item) {
case "object", "array":
allPrimitives = false
break
}
}
s.enumError = "enum failed"
if allPrimitives {
if len(s.Enum) == 1 {
s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0])
} else {
strEnum := make([]string, len(s.Enum))
for i, item := range s.Enum {
strEnum[i] = fmt.Sprintf("%#v", item)
}
s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", "))
}
}
}
s.Minimum = loadRat("minimum")
if exclusive, ok := m["exclusiveMinimum"]; ok {
if exclusive, ok := exclusive.(bool); ok {
if exclusive {
s.Minimum, s.ExclusiveMinimum = nil, s.Minimum
}
} else {
s.ExclusiveMinimum = loadRat("exclusiveMinimum")
}
}
s.Maximum = loadRat("maximum")
if exclusive, ok := m["exclusiveMaximum"]; ok {
if exclusive, ok := exclusive.(bool); ok {
if exclusive {
s.Maximum, s.ExclusiveMaximum = nil, s.Maximum
}
} else {
s.ExclusiveMaximum = loadRat("exclusiveMaximum")
}
}
s.MultipleOf = loadRat("multipleOf")
s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties")
if req, ok := m["required"]; ok {
s.Required = toStrings(req.([]interface{}))
}
s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems")
if unique, ok := m["uniqueItems"]; ok {
s.UniqueItems = unique.(bool)
}
s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength")
if pattern, ok := m["pattern"]; ok {
s.Pattern = regexp.MustCompile(pattern.(string))
}
if r.draft.version >= 2019 {
s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains")
if s.MinContains == -1 {
s.MinContains = 1
}
if deps, ok := m["dependentRequired"]; ok {
deps := deps.(map[string]interface{})
s.DependentRequired = make(map[string][]string, len(deps))
for pname, pvalue := range deps {
s.DependentRequired[pname] = toStrings(pvalue.([]interface{}))
}
}
}
}
compile := func(stack []schemaRef, ptr string) (*Schema, error) {
return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr)
}
loadSchema := func(pname string, stack []schemaRef) (*Schema, error) {
if _, ok := m[pname]; ok {
return compile(stack, escape(pname))
}
return nil, nil
}
loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) {
if pvalue, ok := m[pname]; ok {
pvalue := pvalue.([]interface{})
schemas := make([]*Schema, len(pvalue))
for i := range pvalue {
sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i))
if err != nil {
return nil, err
}
schemas[i] = sch
}
return schemas, nil
}
return nil, nil
}
if r.draft.version < 2019 || r.schema.meta.hasVocab("applicator") {
if s.Not, err = loadSchema("not", stack); err != nil {
return err
}
if s.AllOf, err = loadSchemas("allOf", stack); err != nil {
return err
}
if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil {
return err
}
if s.OneOf, err = loadSchemas("oneOf", stack); err != nil {
return err
}
if props, ok := m["properties"]; ok {
props := props.(map[string]interface{})
s.Properties = make(map[string]*Schema, len(props))
for pname := range props {
s.Properties[pname], err = compile(nil, "properties/"+escape(pname))
if err != nil {
return err
}
}
}
if regexProps, ok := m["regexProperties"]; ok {
s.RegexProperties = regexProps.(bool)
}
if patternProps, ok := m["patternProperties"]; ok {
patternProps := patternProps.(map[string]interface{})
s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps))
for pattern := range patternProps {
s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern))
if err != nil {
return err
}
}
}
if additionalProps, ok := m["additionalProperties"]; ok {
switch additionalProps := additionalProps.(type) {
case bool:
s.AdditionalProperties = additionalProps
case map[string]interface{}:
s.AdditionalProperties, err = compile(nil, "additionalProperties")
if err != nil {
return err
}
}
}
if deps, ok := m["dependencies"]; ok {
deps := deps.(map[string]interface{})
s.Dependencies = make(map[string]interface{}, len(deps))
for pname, pvalue := range deps {
switch pvalue := pvalue.(type) {
case []interface{}:
s.Dependencies[pname] = toStrings(pvalue)
default:
s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname))
if err != nil {
return err
}
}
}
}
if r.draft.version >= 6 {
if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil {
return err
}
if s.Contains, err = loadSchema("contains", nil); err != nil {
return err
}
}
if r.draft.version >= 7 {
if m["if"] != nil {
if s.If, err = loadSchema("if", stack); err != nil {
return err
}
if s.Then, err = loadSchema("then", stack); err != nil {
return err
}
if s.Else, err = loadSchema("else", stack); err != nil {
return err
}
}
}
if r.draft.version >= 2019 {
if deps, ok := m["dependentSchemas"]; ok {
deps := deps.(map[string]interface{})
s.DependentSchemas = make(map[string]*Schema, len(deps))
for pname := range deps {
s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname))
if err != nil {
return err
}
}
}
}
if r.draft.version >= 2020 {
if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil {
return err
}
if s.Items2020, err = loadSchema("items", nil); err != nil {
return err
}
} else {
if items, ok := m["items"]; ok {
switch items.(type) {
case []interface{}:
s.Items, err = loadSchemas("items", nil)
if err != nil {
return err
}
if additionalItems, ok := m["additionalItems"]; ok {
switch additionalItems := additionalItems.(type) {
case bool:
s.AdditionalItems = additionalItems
case map[string]interface{}:
s.AdditionalItems, err = compile(nil, "additionalItems")
if err != nil {
return err
}
}
}
default:
s.Items, err = compile(nil, "items")
if err != nil {
return err
}
}
}
}
}
// unevaluatedXXX keywords were in "applicator" vocab in 2019, but moved to new vocab "unevaluated" in 2020
if (r.draft.version == 2019 && r.schema.meta.hasVocab("applicator")) || (r.draft.version >= 2020 && r.schema.meta.hasVocab("unevaluated")) {
if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil {
return err
}
if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil {
return err
}
if r.draft.version >= 2020 {
// any item in an array that passes validation of the contains schema is considered "evaluated"
s.ContainsEval = true
}
}
if format, ok := m["format"]; ok {
s.Format = format.(string)
if r.draft.version < 2019 || c.AssertFormat || r.schema.meta.hasVocab("format-assertion") {
s.format, _ = Formats[s.Format]
}
}
if c.ExtractAnnotations {
if title, ok := m["title"]; ok {
s.Title = title.(string)
}
if description, ok := m["description"]; ok {
s.Description = description.(string)
}
s.Default = m["default"]
}
if r.draft.version >= 6 {
if c, ok := m["const"]; ok {
s.Constant = []interface{}{c}
}
}
if r.draft.version >= 7 {
if encoding, ok := m["contentEncoding"]; ok {
s.ContentEncoding = encoding.(string)
s.decoder, _ = Decoders[s.ContentEncoding]
}
if mediaType, ok := m["contentMediaType"]; ok {
s.ContentMediaType = mediaType.(string)
s.mediaType, _ = MediaTypes[s.ContentMediaType]
if s.ContentSchema, err = loadSchema("contentSchema", stack); err != nil {
return err
}
}
if c.ExtractAnnotations {
if comment, ok := m["$comment"]; ok {
s.Comment = comment.(string)
}
if readOnly, ok := m["readOnly"]; ok {
s.ReadOnly = readOnly.(bool)
}
if writeOnly, ok := m["writeOnly"]; ok {
s.WriteOnly = writeOnly.(bool)
}
if examples, ok := m["examples"]; ok {
s.Examples = examples.([]interface{})
}
}
}
if r.draft.version >= 2019 {
if !c.AssertContent {
s.decoder = nil
s.mediaType = nil
s.ContentSchema = nil
}
if c.ExtractAnnotations {
if deprecated, ok := m["deprecated"]; ok {
s.Deprecated = deprecated.(bool)
}
}
}
for name, ext := range c.extensions {
es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m)
if err != nil {
return err
}
if es != nil {
if s.Extensions == nil {
s.Extensions = make(map[string]ExtSchema)
}
s.Extensions[name] = es
}
}
return nil
}
func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error {
validate := func(meta *Schema) error {
if meta == nil {
return nil
}
return meta.validateValue(v, vloc)
}
if err := validate(r.draft.meta); err != nil {
return err
}
for _, ext := range c.extensions {
if err := validate(ext.meta); err != nil {
return err
}
}
return nil
}
func toStrings(arr []interface{}) []string {
s := make([]string, len(arr))
for i, v := range arr {
s[i] = v.(string)
}
return s
}
// SchemaRef captures schema and the path referring to it.
type schemaRef struct {
path string // relative-json-pointer to schema
schema *Schema // target schema
discard bool // true when scope left
}
func (sr schemaRef) String() string {
return fmt.Sprintf("(%s)%v", sr.path, sr.schema)
}
func checkLoop(stack []schemaRef, sref schemaRef) error {
for _, ref := range stack {
if ref.schema == sref.schema {
return infiniteLoopError(stack, sref)
}
}
return nil
}
func keywordLocation(stack []schemaRef, path string) string {
var loc string
for _, ref := range stack[1:] {
loc += "/" + ref.path
}
if path != "" {
loc = loc + "/" + path
}
return loc
}

View file

@ -0,0 +1,29 @@
package jsonschema
import (
"encoding/base64"
"encoding/json"
)
// Decoders is a registry of functions, which know how to decode
// string encoded in specific format.
//
// New Decoders can be registered by adding to this map. Key is encoding name,
// value is function that knows how to decode string in that format.
var Decoders = map[string]func(string) ([]byte, error){
"base64": base64.StdEncoding.DecodeString,
}
// MediaTypes is a registry of functions, which know how to validate
// whether the bytes represent data of that mediaType.
//
// New mediaTypes can be registered by adding to this map. Key is mediaType name,
// value is function that knows how to validate that mediaType.
var MediaTypes = map[string]func([]byte) error{
"application/json": validateJSON,
}
func validateJSON(b []byte) error {
var v interface{}
return json.Unmarshal(b, &v)
}

49
vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
/*
Package jsonschema provides json-schema compilation and validation.
Features:
- implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4
- fully compliant with JSON-Schema-Test-Suite, (excluding some optional)
- list of optional tests that are excluded can be found in schema_test.go(variable skipTests)
- validates schemas against meta-schema
- full support of remote references
- support of recursive references between schemas
- detects infinite loop in schemas
- thread safe validation
- rich, intuitive hierarchial error messages with json-pointers to exact location
- supports output formats flag, basic and detailed
- supports enabling format and content Assertions in draft2019-09 or above
- change Compiler.AssertFormat, Compiler.AssertContent to true
- compiled schema can be introspected. easier to develop tools like generating go structs given schema
- supports user-defined keywords via extensions
- implements following formats (supports user-defined)
- date-time, date, time, duration (supports leap-second)
- uuid, hostname, email
- ip-address, ipv4, ipv6
- uri, uriref, uri-template(limited validation)
- json-pointer, relative-json-pointer
- regex, format
- implements following contentEncoding (supports user-defined)
- base64
- implements following contentMediaType (supports user-defined)
- application/json
- can load from files/http/https/string/[]byte/io.Reader (supports user-defined)
The schema is compiled against the version specified in "$schema" property.
If "$schema" property is missing, it uses latest draft which currently implemented
by this library.
You can force to use specific draft, when "$schema" is missing, as follows:
compiler := jsonschema.NewCompiler()
compiler.Draft = jsonschema.Draft4
This package supports loading json-schema from filePath and fileURL.
To load json-schema from HTTPURL, add following import:
import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA
*/
package jsonschema

1432
vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,129 @@
package jsonschema
import (
"fmt"
"strings"
)
// InvalidJSONTypeError is the error type returned by ValidateInterface.
// this tells that specified go object is not valid jsonType.
type InvalidJSONTypeError string
func (e InvalidJSONTypeError) Error() string {
return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e))
}
// InfiniteLoopError is returned by Compile/Validate.
// this gives url#keywordLocation that lead to infinity loop.
type InfiniteLoopError string
func (e InfiniteLoopError) Error() string {
return "jsonschema: infinite loop " + string(e)
}
func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError {
var path string
for _, ref := range stack {
if path == "" {
path += ref.schema.Location
} else {
path += "/" + ref.path
}
}
return InfiniteLoopError(path + "/" + sref.path)
}
// SchemaError is the error type returned by Compile.
type SchemaError struct {
// SchemaURL is the url to json-schema that filed to compile.
// This is helpful, if your schema refers to external schemas
SchemaURL string
// Err is the error that occurred during compilation.
// It could be ValidationError, because compilation validates
// given schema against the json meta-schema
Err error
}
func (se *SchemaError) Unwrap() error {
return se.Err
}
func (se *SchemaError) Error() string {
s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL)
if se.Err != nil {
return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: "))
}
return s
}
func (se *SchemaError) GoString() string {
if _, ok := se.Err.(*ValidationError); ok {
return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err)
}
return se.Error()
}
// ValidationError is the error type returned by Validate.
type ValidationError struct {
KeywordLocation string // validation path of validating keyword or schema
AbsoluteKeywordLocation string // absolute location of validating keyword or schema
InstanceLocation string // location of the json value within the instance being validated
Message string // describes error
Causes []*ValidationError // nested validation errors
}
func (ve *ValidationError) add(causes ...error) error {
for _, cause := range causes {
ve.Causes = append(ve.Causes, cause.(*ValidationError))
}
return ve
}
func (ve *ValidationError) causes(err error) error {
if err := err.(*ValidationError); err.Message == "" {
ve.Causes = err.Causes
} else {
ve.add(err)
}
return ve
}
func (ve *ValidationError) Error() string {
leaf := ve
for len(leaf.Causes) > 0 {
leaf = leaf.Causes[0]
}
u, _ := split(ve.AbsoluteKeywordLocation)
return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(leaf.InstanceLocation), u+"#"+leaf.KeywordLocation, leaf.Message)
}
func (ve *ValidationError) GoString() string {
sloc := ve.AbsoluteKeywordLocation
sloc = sloc[strings.IndexByte(sloc, '#')+1:]
msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message)
for _, c := range ve.Causes {
for _, line := range strings.Split(c.GoString(), "\n") {
msg += "\n " + line
}
}
return msg
}
func joinPtr(ptr1, ptr2 string) string {
if len(ptr1) == 0 {
return ptr2
}
if len(ptr2) == 0 {
return ptr1
}
return ptr1 + "/" + ptr2
}
// quote returns single-quoted string
func quote(s string) string {
s = fmt.Sprintf("%q", s)
s = strings.ReplaceAll(s, `\"`, `"`)
s = strings.ReplaceAll(s, `'`, `\'`)
return "'" + s[1:len(s)-1] + "'"
}

View file

@ -0,0 +1,116 @@
package jsonschema
// ExtCompiler compiles custom keyword(s) into ExtSchema.
type ExtCompiler interface {
// Compile compiles the custom keywords in schema m and returns its compiled representation.
// if the schema m does not contain the keywords defined by this extension,
// compiled representation nil should be returned.
Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error)
}
// ExtSchema is schema representation of custom keyword(s)
type ExtSchema interface {
// Validate validates the json value v with this ExtSchema.
// Returned error must be *ValidationError.
Validate(ctx ValidationContext, v interface{}) error
}
type extension struct {
meta *Schema
compiler ExtCompiler
}
// RegisterExtension registers custom keyword(s) into this compiler.
//
// name is extension name, used only to avoid name collisions.
// meta captures the metaschema for the new keywords.
// This is used to validate the schema before calling ext.Compile.
func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) {
c.extensions[name] = extension{meta, ext}
}
// CompilerContext ---
// CompilerContext provides additional context required in compiling for extension.
type CompilerContext struct {
c *Compiler
r *resource
stack []schemaRef
res *resource
}
// Compile compiles given value at ptr into *Schema. This is useful in implementing
// keyword like allOf/not/patternProperties.
//
// schPath is the relative-json-pointer to the schema to be compiled from parent schema.
//
// applicableOnSameInstance tells whether current schema and the given schema
// are applied on same instance value. this is used to detect infinite loop in schema.
func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) {
var stack []schemaRef
if applicableOnSameInstance {
stack = ctx.stack
}
return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath)
}
// CompileRef compiles the schema referenced by ref uri
//
// refPath is the relative-json-pointer to ref.
//
// applicableOnSameInstance tells whether current schema and the given schema
// are applied on same instance value. this is used to detect infinite loop in schema.
func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) {
var stack []schemaRef
if applicableOnSameInstance {
stack = ctx.stack
}
return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref)
}
// ValidationContext ---
// ValidationContext provides additional context required in validating for extension.
type ValidationContext struct {
result validationResult
validate func(sch *Schema, schPath string, v interface{}, vpath string) error
validateInplace func(sch *Schema, schPath string) error
validationError func(keywordPath string, format string, a ...interface{}) *ValidationError
}
// EvaluatedProp marks given property of object as evaluated.
func (ctx ValidationContext) EvaluatedProp(prop string) {
delete(ctx.result.unevalProps, prop)
}
// EvaluatedItem marks given index of array as evaluated.
func (ctx ValidationContext) EvaluatedItem(index int) {
delete(ctx.result.unevalItems, index)
}
// Validate validates schema s with value v. Extension must use this method instead of
// *Schema.ValidateInterface method. This will be useful in implementing keywords like
// allOf/oneOf
//
// spath is relative-json-pointer to s
// vpath is relative-json-pointer to v.
func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error {
if vpath == "" {
return ctx.validateInplace(s, spath)
}
return ctx.validate(s, spath, v, vpath)
}
// Error used to construct validation error by extensions.
//
// keywordPath is relative-json-pointer to keyword.
func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError {
return ctx.validationError(keywordPath, format, a...)
}
// Group is used by extensions to group multiple errors as causes to parent error.
// This is useful in implementing keywords like allOf where each schema specified
// in allOf can result a validationError.
func (ValidationError) Group(parent *ValidationError, causes ...error) error {
return parent.add(causes...)
}

View file

@ -0,0 +1,567 @@
package jsonschema
import (
"errors"
"net"
"net/mail"
"net/url"
"regexp"
"strconv"
"strings"
"time"
)
// Formats is a registry of functions, which know how to validate
// a specific format.
//
// New Formats can be registered by adding to this map. Key is format name,
// value is function that knows how to validate that format.
var Formats = map[string]func(interface{}) bool{
"date-time": isDateTime,
"date": isDate,
"time": isTime,
"duration": isDuration,
"period": isPeriod,
"hostname": isHostname,
"email": isEmail,
"ip-address": isIPV4,
"ipv4": isIPV4,
"ipv6": isIPV6,
"uri": isURI,
"iri": isURI,
"uri-reference": isURIReference,
"uriref": isURIReference,
"iri-reference": isURIReference,
"uri-template": isURITemplate,
"regex": isRegex,
"json-pointer": isJSONPointer,
"relative-json-pointer": isRelativeJSONPointer,
"uuid": isUUID,
}
// isDateTime tells whether given string is a valid date representation
// as defined by RFC 3339, section 5.6.
//
// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details
func isDateTime(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ
return false
}
if s[10] != 'T' && s[10] != 't' {
return false
}
return isDate(s[:10]) && isTime(s[11:])
}
// isDate tells whether given string is a valid full-date production
// as defined by RFC 3339, section 5.6.
//
// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details
func isDate(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
_, err := time.Parse("2006-01-02", s)
return err == nil
}
// isTime tells whether given string is a valid full-time production
// as defined by RFC 3339, section 5.6.
//
// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details
func isTime(v interface{}) bool {
str, ok := v.(string)
if !ok {
return true
}
// golang time package does not support leap seconds.
// so we are parsing it manually here.
// hh:mm:ss
// 01234567
if len(str) < 9 || str[2] != ':' || str[5] != ':' {
return false
}
isInRange := func(str string, min, max int) (int, bool) {
n, err := strconv.Atoi(str)
if err != nil {
return 0, false
}
if n < min || n > max {
return 0, false
}
return n, true
}
var h, m, s int
if h, ok = isInRange(str[0:2], 0, 23); !ok {
return false
}
if m, ok = isInRange(str[3:5], 0, 59); !ok {
return false
}
if s, ok = isInRange(str[6:8], 0, 60); !ok {
return false
}
str = str[8:]
// parse secfrac if present
if str[0] == '.' {
// dot following more than one digit
str = str[1:]
var numDigits int
for str != "" {
if str[0] < '0' || str[0] > '9' {
break
}
numDigits++
str = str[1:]
}
if numDigits == 0 {
return false
}
}
if len(str) == 0 {
return false
}
if str[0] == 'z' || str[0] == 'Z' {
if len(str) != 1 {
return false
}
} else {
// time-numoffset
// +hh:mm
// 012345
if len(str) != 6 || str[3] != ':' {
return false
}
var sign int
if str[0] == '+' {
sign = -1
} else if str[0] == '-' {
sign = +1
} else {
return false
}
var zh, zm int
if zh, ok = isInRange(str[1:3], 0, 23); !ok {
return false
}
if zm, ok = isInRange(str[4:6], 0, 59); !ok {
return false
}
// apply timezone offset
hm := (h*60 + m) + sign*(zh*60+zm)
if hm < 0 {
hm += 24 * 60
}
h, m = hm/60, hm%60
}
// check leapsecond
if s == 60 { // leap second
if h != 23 || m != 59 {
return false
}
}
return true
}
// isDuration tells whether given string is a valid duration format
// from the ISO 8601 ABNF as given in Appendix A of RFC 3339.
//
// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details
func isDuration(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
if len(s) == 0 || s[0] != 'P' {
return false
}
s = s[1:]
parseUnits := func() (units string, ok bool) {
for len(s) > 0 && s[0] != 'T' {
digits := false
for {
if len(s) == 0 {
break
}
if s[0] < '0' || s[0] > '9' {
break
}
digits = true
s = s[1:]
}
if !digits || len(s) == 0 {
return units, false
}
units += s[:1]
s = s[1:]
}
return units, true
}
units, ok := parseUnits()
if !ok {
return false
}
if units == "W" {
return len(s) == 0 // P_W
}
if len(units) > 0 {
if strings.Index("YMD", units) == -1 {
return false
}
if len(s) == 0 {
return true // "P" dur-date
}
}
if len(s) == 0 || s[0] != 'T' {
return false
}
s = s[1:]
units, ok = parseUnits()
return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1
}
// isPeriod tells whether given string is a valid period format
// from the ISO 8601 ABNF as given in Appendix A of RFC 3339.
//
// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details
func isPeriod(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
slash := strings.IndexByte(s, '/')
if slash == -1 {
return false
}
start, end := s[:slash], s[slash+1:]
if isDateTime(start) {
return isDateTime(end) || isDuration(end)
}
return isDuration(start) && isDateTime(end)
}
// isHostname tells whether given string is a valid representation
// for an Internet host name, as defined by RFC 1034 section 3.1 and
// RFC 1123 section 2.1.
//
// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details.
func isHostname(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
// entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters
s = strings.TrimSuffix(s, ".")
if len(s) > 253 {
return false
}
// Hostnames are composed of series of labels concatenated with dots, as are all domain names
for _, label := range strings.Split(s, ".") {
// Each label must be from 1 to 63 characters long
if labelLen := len(label); labelLen < 1 || labelLen > 63 {
return false
}
// labels must not start with a hyphen
// RFC 1123 section 2.1: restriction on the first character
// is relaxed to allow either a letter or a digit
if first := s[0]; first == '-' {
return false
}
// must not end with a hyphen
if label[len(label)-1] == '-' {
return false
}
// labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner),
// the digits '0' through '9', and the hyphen ('-')
for _, c := range label {
if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid {
return false
}
}
}
return true
}
// isEmail tells whether given string is a valid Internet email address
// as defined by RFC 5322, section 3.4.1.
//
// See https://en.wikipedia.org/wiki/Email_address, for details.
func isEmail(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
// entire email address to be no more than 254 characters long
if len(s) > 254 {
return false
}
// email address is generally recognized as having two parts joined with an at-sign
at := strings.LastIndexByte(s, '@')
if at == -1 {
return false
}
local := s[0:at]
domain := s[at+1:]
// local part may be up to 64 characters long
if len(local) > 64 {
return false
}
// domain if enclosed in brackets, must match an IP address
if len(domain) >= 2 && domain[0] == '[' && domain[len(domain)-1] == ']' {
ip := domain[1 : len(domain)-1]
if strings.HasPrefix(ip, "IPv6:") {
return isIPV6(strings.TrimPrefix(ip, "IPv6:"))
}
return isIPV4(ip)
}
// domain must match the requirements for a hostname
if !isHostname(domain) {
return false
}
_, err := mail.ParseAddress(s)
return err == nil
}
// isIPV4 tells whether given string is a valid representation of an IPv4 address
// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2.
func isIPV4(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
groups := strings.Split(s, ".")
if len(groups) != 4 {
return false
}
for _, group := range groups {
n, err := strconv.Atoi(group)
if err != nil {
return false
}
if n < 0 || n > 255 {
return false
}
if n != 0 && group[0] == '0' {
return false // leading zeroes should be rejected, as they are treated as octals
}
}
return true
}
// isIPV6 tells whether given string is a valid representation of an IPv6 address
// as defined in RFC 2373, section 2.2.
func isIPV6(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
if !strings.Contains(s, ":") {
return false
}
return net.ParseIP(s) != nil
}
// isURI tells whether given string is valid URI, according to RFC 3986.
func isURI(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
u, err := urlParse(s)
return err == nil && u.IsAbs()
}
func urlParse(s string) (*url.URL, error) {
u, err := url.Parse(s)
if err != nil {
return nil, err
}
// if hostname is ipv6, validate it
hostname := u.Hostname()
if strings.IndexByte(hostname, ':') != -1 {
if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 {
return nil, errors.New("ipv6 address is not enclosed in brackets")
}
if !isIPV6(hostname) {
return nil, errors.New("invalid ipv6 address")
}
}
return u, nil
}
// isURIReference tells whether given string is a valid URI Reference
// (either a URI or a relative-reference), according to RFC 3986.
func isURIReference(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
_, err := urlParse(s)
return err == nil && !strings.Contains(s, `\`)
}
// isURITemplate tells whether given string is a valid URI Template
// according to RFC6570.
//
// Current implementation does minimal validation.
func isURITemplate(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
u, err := urlParse(s)
if err != nil {
return false
}
for _, item := range strings.Split(u.RawPath, "/") {
depth := 0
for _, ch := range item {
switch ch {
case '{':
depth++
if depth != 1 {
return false
}
case '}':
depth--
if depth != 0 {
return false
}
}
}
if depth != 0 {
return false
}
}
return true
}
// isRegex tells whether given string is a valid regular expression,
// according to the ECMA 262 regular expression dialect.
//
// The implementation uses go-lang regexp package.
func isRegex(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
_, err := regexp.Compile(s)
return err == nil
}
// isJSONPointer tells whether given string is a valid JSON Pointer.
//
// Note: It returns false for JSON Pointer URI fragments.
func isJSONPointer(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
if s != "" && !strings.HasPrefix(s, "/") {
return false
}
for _, item := range strings.Split(s, "/") {
for i := 0; i < len(item); i++ {
if item[i] == '~' {
if i == len(item)-1 {
return false
}
switch item[i+1] {
case '0', '1':
// valid
default:
return false
}
}
}
}
return true
}
// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer.
//
// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
func isRelativeJSONPointer(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
if s == "" {
return false
}
if s[0] == '0' {
s = s[1:]
} else if s[0] >= '0' && s[0] <= '9' {
for s != "" && s[0] >= '0' && s[0] <= '9' {
s = s[1:]
}
} else {
return false
}
return s == "#" || isJSONPointer(s)
}
// isUUID tells whether given string is a valid uuid format
// as specified in RFC4122.
//
// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details
func isUUID(v interface{}) bool {
s, ok := v.(string)
if !ok {
return true
}
parseHex := func(n int) bool {
for n > 0 {
if len(s) == 0 {
return false
}
hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F')
if !hex {
return false
}
s = s[1:]
n--
}
return true
}
groups := []int{8, 4, 4, 4, 12}
for i, numDigits := range groups {
if !parseHex(numDigits) {
return false
}
if i == len(groups)-1 {
break
}
if len(s) == 0 || s[0] != '-' {
return false
}
s = s[1:]
}
return len(s) == 0
}

View file

@ -0,0 +1,38 @@
// Package httploader implements loader.Loader for http/https url.
//
// The package is typically only imported for the side effect of
// registering its Loaders.
//
// To use httploader, link this package into your program:
//
// import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
package httploader
import (
"fmt"
"io"
"net/http"
"github.com/santhosh-tekuri/jsonschema/v5"
)
// Client is the default HTTP Client used to Get the resource.
var Client = http.DefaultClient
// Load loads resource from given http(s) url.
func Load(url string) (io.ReadCloser, error) {
resp, err := Client.Get(url)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
_ = resp.Body.Close()
return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode)
}
return resp.Body, nil
}
func init() {
jsonschema.Loaders["http"] = Load
jsonschema.Loaders["https"] = Load
}

View file

@ -0,0 +1,60 @@
package jsonschema
import (
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
)
func loadFileURL(s string) (io.ReadCloser, error) {
u, err := url.Parse(s)
if err != nil {
return nil, err
}
f := u.Path
if runtime.GOOS == "windows" {
f = strings.TrimPrefix(f, "/")
f = filepath.FromSlash(f)
}
return os.Open(f)
}
// Loaders is a registry of functions, which know how to load
// absolute url of specific schema.
//
// New loaders can be registered by adding to this map. Key is schema,
// value is function that knows how to load url of that schema
var Loaders = map[string]func(url string) (io.ReadCloser, error){
"file": loadFileURL,
}
// LoaderNotFoundError is the error type returned by Load function.
// It tells that no Loader is registered for that URL Scheme.
type LoaderNotFoundError string
func (e LoaderNotFoundError) Error() string {
return fmt.Sprintf("jsonschema: no Loader found for %s", string(e))
}
// LoadURL loads document at given absolute URL. The default implementation
// uses Loaders registry to lookup by schema and uses that loader.
//
// Users can change this variable, if they would like to take complete
// responsibility of loading given URL. Used by Compiler if its LoadURL
// field is nil.
var LoadURL = func(s string) (io.ReadCloser, error) {
u, err := url.Parse(s)
if err != nil {
return nil, err
}
loader, ok := Loaders[u.Scheme]
if !ok {
return nil, LoaderNotFoundError(s)
}
return loader(s)
}

View file

@ -0,0 +1,77 @@
package jsonschema
// Flag is output format with simple boolean property valid.
type Flag struct {
Valid bool `json:"valid"`
}
// FlagOutput returns output in flag format
func (ve *ValidationError) FlagOutput() Flag {
return Flag{}
}
// Basic ---
// Basic is output format with flat list of output units.
type Basic struct {
Valid bool `json:"valid"`
Errors []BasicError `json:"errors"`
}
// BasicError is output unit in basic format.
type BasicError struct {
KeywordLocation string `json:"keywordLocation"`
AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"`
InstanceLocation string `json:"instanceLocation"`
Error string `json:"error"`
}
// BasicOutput returns output in basic format
func (ve *ValidationError) BasicOutput() Basic {
var errors []BasicError
var flatten func(*ValidationError)
flatten = func(ve *ValidationError) {
errors = append(errors, BasicError{
KeywordLocation: ve.KeywordLocation,
AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation,
InstanceLocation: ve.InstanceLocation,
Error: ve.Message,
})
for _, cause := range ve.Causes {
flatten(cause)
}
}
flatten(ve)
return Basic{Errors: errors}
}
// Detailed ---
// Detailed is output format based on structure of schema.
type Detailed struct {
Valid bool `json:"valid"`
KeywordLocation string `json:"keywordLocation"`
AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"`
InstanceLocation string `json:"instanceLocation"`
Error string `json:"error,omitempty"`
Errors []Detailed `json:"errors,omitempty"`
}
// DetailedOutput returns output in detailed format
func (ve *ValidationError) DetailedOutput() Detailed {
var errors []Detailed
for _, cause := range ve.Causes {
errors = append(errors, cause.DetailedOutput())
}
var message = ve.Message
if len(ve.Causes) > 0 {
message = ""
}
return Detailed{
KeywordLocation: ve.KeywordLocation,
AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation,
InstanceLocation: ve.InstanceLocation,
Error: message,
Errors: errors,
}
}

View file

@ -0,0 +1,280 @@
package jsonschema
import (
"encoding/json"
"fmt"
"io"
"net/url"
"path/filepath"
"runtime"
"strconv"
"strings"
)
type resource struct {
url string // base url of resource. can be empty
floc string // fragment with json-pointer from root resource
doc interface{}
draft *Draft
subresources map[string]*resource // key is floc. only applicable for root resource
schema *Schema
}
func (r *resource) String() string {
return r.url + r.floc
}
func newResource(url string, r io.Reader) (*resource, error) {
if strings.IndexByte(url, '#') != -1 {
panic(fmt.Sprintf("BUG: newResource(%q)", url))
}
doc, err := unmarshal(r)
if err != nil {
return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err)
}
url, err = toAbs(url)
if err != nil {
return nil, err
}
return &resource{
url: url,
floc: "#",
doc: doc,
}, nil
}
// fillSubschemas fills subschemas in res into r.subresources
func (r *resource) fillSubschemas(c *Compiler, res *resource) error {
if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil {
return err
}
if r.subresources == nil {
r.subresources = make(map[string]*resource)
}
if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil {
return err
}
// ensure subresource.url uniqueness
url2floc := make(map[string]string)
for _, sr := range r.subresources {
if sr.url != "" {
if floc, ok := url2floc[sr.url]; ok {
return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url)
}
url2floc[sr.url] = sr.floc
}
}
return nil
}
// listResources lists all subresources in res
func (r *resource) listResources(res *resource) []*resource {
var result []*resource
prefix := res.floc + "/"
for _, sr := range r.subresources {
if strings.HasPrefix(sr.floc, prefix) {
result = append(result, sr)
}
}
return result
}
func (r *resource) findResource(url string) *resource {
if r.url == url {
return r
}
for _, res := range r.subresources {
if res.url == url {
return res
}
}
return nil
}
// resolve fragment f with sr as base
func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) {
if f == "#" || f == "#/" {
return sr, nil
}
// resolve by anchor
if !strings.HasPrefix(f, "#/") {
// check in given resource
for _, anchor := range r.draft.anchors(sr.doc) {
if anchor == f[1:] {
return sr, nil
}
}
// check in subresources that has same base url
prefix := sr.floc + "/"
for _, res := range r.subresources {
if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url {
for _, anchor := range r.draft.anchors(res.doc) {
if anchor == f[1:] {
return res, nil
}
}
}
}
return nil, nil
}
// resolve by ptr
floc := sr.floc + f[1:]
if res, ok := r.subresources[floc]; ok {
return res, nil
}
// non-standrad location
doc := r.doc
for _, item := range strings.Split(floc[2:], "/") {
item = strings.Replace(item, "~1", "/", -1)
item = strings.Replace(item, "~0", "~", -1)
item, err := url.PathUnescape(item)
if err != nil {
return nil, err
}
switch d := doc.(type) {
case map[string]interface{}:
if _, ok := d[item]; !ok {
return nil, nil
}
doc = d[item]
case []interface{}:
index, err := strconv.Atoi(item)
if err != nil {
return nil, err
}
if index < 0 || index >= len(d) {
return nil, nil
}
doc = d[index]
default:
return nil, nil
}
}
id, err := r.draft.resolveID(r.baseURL(floc), doc)
if err != nil {
return nil, err
}
res := &resource{url: id, floc: floc, doc: doc}
r.subresources[floc] = res
if err := r.fillSubschemas(c, res); err != nil {
return nil, err
}
return res, nil
}
func (r *resource) baseURL(floc string) string {
for {
if sr, ok := r.subresources[floc]; ok {
if sr.url != "" {
return sr.url
}
}
slash := strings.LastIndexByte(floc, '/')
if slash == -1 {
break
}
floc = floc[:slash]
}
return r.url
}
// url helpers ---
func toAbs(s string) (string, error) {
// if windows absolute file path, convert to file url
// because: net/url parses driver name as scheme
if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` {
s = "file:///" + filepath.ToSlash(s)
}
u, err := url.Parse(s)
if err != nil {
return "", err
}
if u.IsAbs() {
return s, nil
}
// s is filepath
if s, err = filepath.Abs(s); err != nil {
return "", err
}
if runtime.GOOS == "windows" {
s = "file:///" + filepath.ToSlash(s)
} else {
s = "file://" + s
}
u, err = url.Parse(s) // to fix spaces in filepath
return u.String(), err
}
func resolveURL(base, ref string) (string, error) {
if ref == "" {
return base, nil
}
if strings.HasPrefix(ref, "urn:") {
return ref, nil
}
refURL, err := url.Parse(ref)
if err != nil {
return "", err
}
if refURL.IsAbs() {
return ref, nil
}
if strings.HasPrefix(base, "urn:") {
base, _ = split(base)
return base + ref, nil
}
baseURL, err := url.Parse(base)
if err != nil {
return "", err
}
return baseURL.ResolveReference(refURL).String(), nil
}
func split(uri string) (string, string) {
hash := strings.IndexByte(uri, '#')
if hash == -1 {
return uri, "#"
}
f := uri[hash:]
if f == "#/" {
f = "#"
}
return uri[0:hash], f
}
func (s *Schema) url() string {
u, _ := split(s.Location)
return u
}
func (s *Schema) loc() string {
_, f := split(s.Location)
return f[1:]
}
func unmarshal(r io.Reader) (interface{}, error) {
decoder := json.NewDecoder(r)
decoder.UseNumber()
var doc interface{}
if err := decoder.Decode(&doc); err != nil {
return nil, err
}
if t, _ := decoder.Token(); t != nil {
return nil, fmt.Errorf("invalid character %v after top-level value", t)
}
return doc, nil
}

View file

@ -0,0 +1,826 @@
package jsonschema
import (
"bytes"
"encoding/json"
"fmt"
"math/big"
"net/url"
"regexp"
"strconv"
"strings"
"unicode/utf8"
)
// A Schema represents compiled version of json-schema.
type Schema struct {
Location string // absolute location
meta *Schema
vocab []string
dynamicAnchors []*Schema
// type agnostic validations
Format string
format func(interface{}) bool
Always *bool // always pass/fail. used when booleans are used as schemas in draft-07.
Ref *Schema
RecursiveAnchor bool
RecursiveRef *Schema
DynamicAnchor string
DynamicRef *Schema
Types []string // allowed types.
Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant.
Enum []interface{} // allowed values.
enumError string // error message for enum fail. captured here to avoid constructing error message every time.
Not *Schema
AllOf []*Schema
AnyOf []*Schema
OneOf []*Schema
If *Schema
Then *Schema // nil, when If is nil.
Else *Schema // nil, when If is nil.
// object validations
MinProperties int // -1 if not specified.
MaxProperties int // -1 if not specified.
Required []string // list of required properties.
Properties map[string]*Schema
PropertyNames *Schema
RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema.
PatternProperties map[*regexp.Regexp]*Schema
AdditionalProperties interface{} // nil or bool or *Schema.
Dependencies map[string]interface{} // map value is *Schema or []string.
DependentRequired map[string][]string
DependentSchemas map[string]*Schema
UnevaluatedProperties *Schema
// array validations
MinItems int // -1 if not specified.
MaxItems int // -1 if not specified.
UniqueItems bool
Items interface{} // nil or *Schema or []*Schema
AdditionalItems interface{} // nil or bool or *Schema.
PrefixItems []*Schema
Items2020 *Schema // items keyword reintroduced in draft 2020-12
Contains *Schema
ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated"
MinContains int // 1 if not specified
MaxContains int // -1 if not specified
UnevaluatedItems *Schema
// string validations
MinLength int // -1 if not specified.
MaxLength int // -1 if not specified.
Pattern *regexp.Regexp
ContentEncoding string
decoder func(string) ([]byte, error)
ContentMediaType string
mediaType func([]byte) error
ContentSchema *Schema
// number validators
Minimum *big.Rat
ExclusiveMinimum *big.Rat
Maximum *big.Rat
ExclusiveMaximum *big.Rat
MultipleOf *big.Rat
// annotations. captured only when Compiler.ExtractAnnotations is true.
Title string
Description string
Default interface{}
Comment string
ReadOnly bool
WriteOnly bool
Examples []interface{}
Deprecated bool
// user defined extensions
Extensions map[string]ExtSchema
}
func (s *Schema) String() string {
return s.Location
}
func newSchema(url, floc string, doc interface{}) *Schema {
// fill with default values
s := &Schema{
Location: url + floc,
MinProperties: -1,
MaxProperties: -1,
MinItems: -1,
MaxItems: -1,
MinContains: 1,
MaxContains: -1,
MinLength: -1,
MaxLength: -1,
}
if doc, ok := doc.(map[string]interface{}); ok {
if ra, ok := doc["$recursiveAnchor"]; ok {
if ra, ok := ra.(bool); ok {
s.RecursiveAnchor = ra
}
}
if da, ok := doc["$dynamicAnchor"]; ok {
if da, ok := da.(string); ok {
s.DynamicAnchor = da
}
}
}
return s
}
func (s *Schema) hasVocab(name string) bool {
if s == nil { // during bootstrap
return true
}
if name == "core" {
return true
}
for _, url := range s.vocab {
if url == "https://json-schema.org/draft/2019-09/vocab/"+name {
return true
}
if url == "https://json-schema.org/draft/2020-12/vocab/"+name {
return true
}
}
return false
}
// Validate validates given doc, against the json-schema s.
//
// the v must be the raw json value. for number precision
// unmarshal with json.UseNumber().
//
// returns *ValidationError if v does not confirm with schema s.
// returns InfiniteLoopError if it detects loop during validation.
// returns InvalidJSONTypeError if it detects any non json value in v.
func (s *Schema) Validate(v interface{}) (err error) {
return s.validateValue(v, "")
}
func (s *Schema) validateValue(v interface{}, vloc string) (err error) {
defer func() {
if r := recover(); r != nil {
switch r := r.(type) {
case InfiniteLoopError, InvalidJSONTypeError:
err = r.(error)
default:
panic(r)
}
}
}()
if _, err := s.validate(nil, 0, "", v, vloc); err != nil {
ve := ValidationError{
KeywordLocation: "",
AbsoluteKeywordLocation: s.Location,
InstanceLocation: vloc,
Message: fmt.Sprintf("doesn't validate with %s", s.Location),
}
return ve.causes(err)
}
return nil
}
// validate validates given value v with this schema.
func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) {
validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError {
return &ValidationError{
KeywordLocation: keywordLocation(scope, keywordPath),
AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath),
InstanceLocation: vloc,
Message: fmt.Sprintf(format, a...),
}
}
sref := schemaRef{spath, s, false}
if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil {
panic(err)
}
scope = append(scope, sref)
vscope++
// populate result
switch v := v.(type) {
case map[string]interface{}:
result.unevalProps = make(map[string]struct{})
for pname := range v {
result.unevalProps[pname] = struct{}{}
}
case []interface{}:
result.unevalItems = make(map[int]struct{})
for i := range v {
result.unevalItems[i] = struct{}{}
}
}
validate := func(sch *Schema, schPath string, v interface{}, vpath string) error {
vloc := vloc
if vpath != "" {
vloc += "/" + vpath
}
_, err := sch.validate(scope, 0, schPath, v, vloc)
return err
}
validateInplace := func(sch *Schema, schPath string) error {
vr, err := sch.validate(scope, vscope, schPath, v, vloc)
if err == nil {
// update result
for pname := range result.unevalProps {
if _, ok := vr.unevalProps[pname]; !ok {
delete(result.unevalProps, pname)
}
}
for i := range result.unevalItems {
if _, ok := vr.unevalItems[i]; !ok {
delete(result.unevalItems, i)
}
}
}
return err
}
if s.Always != nil {
if !*s.Always {
return result, validationError("", "not allowed")
}
return result, nil
}
if len(s.Types) > 0 {
vType := jsonType(v)
matched := false
for _, t := range s.Types {
if vType == t {
matched = true
break
} else if t == "integer" && vType == "number" {
num, _ := new(big.Rat).SetString(fmt.Sprint(v))
if num.IsInt() {
matched = true
break
}
}
}
if !matched {
return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType)
}
}
var errors []error
if len(s.Constant) > 0 {
if !equals(v, s.Constant[0]) {
switch jsonType(s.Constant[0]) {
case "object", "array":
errors = append(errors, validationError("const", "const failed"))
default:
errors = append(errors, validationError("const", "value must be %#v", s.Constant[0]))
}
}
}
if len(s.Enum) > 0 {
matched := false
for _, item := range s.Enum {
if equals(v, item) {
matched = true
break
}
}
if !matched {
errors = append(errors, validationError("enum", s.enumError))
}
}
if s.format != nil && !s.format(v) {
var val = v
if v, ok := v.(string); ok {
val = quote(v)
}
errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format)))
}
switch v := v.(type) {
case map[string]interface{}:
if s.MinProperties != -1 && len(v) < s.MinProperties {
errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v)))
}
if s.MaxProperties != -1 && len(v) > s.MaxProperties {
errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v)))
}
if len(s.Required) > 0 {
var missing []string
for _, pname := range s.Required {
if _, ok := v[pname]; !ok {
missing = append(missing, quote(pname))
}
}
if len(missing) > 0 {
errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", ")))
}
}
for pname, sch := range s.Properties {
if pvalue, ok := v[pname]; ok {
delete(result.unevalProps, pname)
if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil {
errors = append(errors, err)
}
}
}
if s.PropertyNames != nil {
for pname := range v {
if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil {
errors = append(errors, err)
}
}
}
if s.RegexProperties {
for pname := range v {
if !isRegex(pname) {
errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname)))
}
}
}
for pattern, sch := range s.PatternProperties {
for pname, pvalue := range v {
if pattern.MatchString(pname) {
delete(result.unevalProps, pname)
if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil {
errors = append(errors, err)
}
}
}
}
if s.AdditionalProperties != nil {
if allowed, ok := s.AdditionalProperties.(bool); ok {
if !allowed && len(result.unevalProps) > 0 {
errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames()))
}
} else {
schema := s.AdditionalProperties.(*Schema)
for pname := range result.unevalProps {
if pvalue, ok := v[pname]; ok {
if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil {
errors = append(errors, err)
}
}
}
}
result.unevalProps = nil
}
for dname, dvalue := range s.Dependencies {
if _, ok := v[dname]; ok {
switch dvalue := dvalue.(type) {
case *Schema:
if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil {
errors = append(errors, err)
}
case []string:
for i, pname := range dvalue {
if _, ok := v[pname]; !ok {
errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname)))
}
}
}
}
}
for dname, dvalue := range s.DependentRequired {
if _, ok := v[dname]; ok {
for i, pname := range dvalue {
if _, ok := v[pname]; !ok {
errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname)))
}
}
}
}
for dname, sch := range s.DependentSchemas {
if _, ok := v[dname]; ok {
if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil {
errors = append(errors, err)
}
}
}
case []interface{}:
if s.MinItems != -1 && len(v) < s.MinItems {
errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v)))
}
if s.MaxItems != -1 && len(v) > s.MaxItems {
errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v)))
}
if s.UniqueItems {
for i := 1; i < len(v); i++ {
for j := 0; j < i; j++ {
if equals(v[i], v[j]) {
errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i))
}
}
}
}
// items + additionalItems
switch items := s.Items.(type) {
case *Schema:
for i, item := range v {
if err := validate(items, "items", item, strconv.Itoa(i)); err != nil {
errors = append(errors, err)
}
}
result.unevalItems = nil
case []*Schema:
for i, item := range v {
if i < len(items) {
delete(result.unevalItems, i)
if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil {
errors = append(errors, err)
}
} else if sch, ok := s.AdditionalItems.(*Schema); ok {
delete(result.unevalItems, i)
if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil {
errors = append(errors, err)
}
} else {
break
}
}
if additionalItems, ok := s.AdditionalItems.(bool); ok {
if additionalItems {
result.unevalItems = nil
} else if len(v) > len(items) {
errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v)))
}
}
}
// prefixItems + items
for i, item := range v {
if i < len(s.PrefixItems) {
delete(result.unevalItems, i)
if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil {
errors = append(errors, err)
}
} else if s.Items2020 != nil {
delete(result.unevalItems, i)
if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil {
errors = append(errors, err)
}
} else {
break
}
}
// contains + minContains + maxContains
if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) {
matched := 0
var causes []error
for i, item := range v {
if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil {
causes = append(causes, err)
} else {
matched++
if s.ContainsEval {
delete(result.unevalItems, i)
}
}
}
if s.MinContains != -1 && matched < s.MinContains {
errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...))
}
if s.MaxContains != -1 && matched > s.MaxContains {
errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched))
}
}
case string:
// minLength + maxLength
if s.MinLength != -1 || s.MaxLength != -1 {
length := utf8.RuneCount([]byte(v))
if s.MinLength != -1 && length < s.MinLength {
errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length))
}
if s.MaxLength != -1 && length > s.MaxLength {
errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length))
}
}
if s.Pattern != nil && !s.Pattern.MatchString(v) {
errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String())))
}
// contentEncoding + contentMediaType
if s.decoder != nil || s.mediaType != nil {
decoded := s.ContentEncoding == ""
var content []byte
if s.decoder != nil {
b, err := s.decoder(v)
if err != nil {
errors = append(errors, validationError("contentEncoding", "value is not %s encoded", s.ContentEncoding))
} else {
content, decoded = b, true
}
}
if decoded && s.mediaType != nil {
if s.decoder == nil {
content = []byte(v)
}
if err := s.mediaType(content); err != nil {
errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType)))
}
}
if decoded && s.ContentSchema != nil {
contentJSON, err := unmarshal(bytes.NewReader(content))
if err != nil {
errors = append(errors, validationError("contentSchema", "value is not valid json"))
} else {
err := validate(s.ContentSchema, "contentSchema", contentJSON, "")
if err != nil {
errors = append(errors, err)
}
}
}
}
case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64:
// lazy convert to *big.Rat to avoid allocation
var numVal *big.Rat
num := func() *big.Rat {
if numVal == nil {
numVal, _ = new(big.Rat).SetString(fmt.Sprint(v))
}
return numVal
}
f64 := func(r *big.Rat) float64 {
f, _ := r.Float64()
return f
}
if s.Minimum != nil && num().Cmp(s.Minimum) < 0 {
errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v))
}
if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 {
errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v))
}
if s.Maximum != nil && num().Cmp(s.Maximum) > 0 {
errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v))
}
if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 {
errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v))
}
if s.MultipleOf != nil {
if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() {
errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf)))
}
}
}
// $ref + $recursiveRef + $dynamicRef
validateRef := func(sch *Schema, refPath string) error {
if sch != nil {
if err := validateInplace(sch, refPath); err != nil {
var url = sch.Location
if s.url() == sch.url() {
url = sch.loc()
}
return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err)
}
}
return nil
}
if err := validateRef(s.Ref, "$ref"); err != nil {
errors = append(errors, err)
}
if s.RecursiveRef != nil {
sch := s.RecursiveRef
if sch.RecursiveAnchor {
// recursiveRef based on scope
for _, e := range scope {
if e.schema.RecursiveAnchor {
sch = e.schema
break
}
}
}
if err := validateRef(sch, "$recursiveRef"); err != nil {
errors = append(errors, err)
}
}
if s.DynamicRef != nil {
sch := s.DynamicRef
if sch.DynamicAnchor != "" {
// dynamicRef based on scope
for i := len(scope) - 1; i >= 0; i-- {
sr := scope[i]
if sr.discard {
break
}
for _, da := range sr.schema.dynamicAnchors {
if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef {
sch = da
break
}
}
}
}
if err := validateRef(sch, "$dynamicRef"); err != nil {
errors = append(errors, err)
}
}
if s.Not != nil && validateInplace(s.Not, "not") == nil {
errors = append(errors, validationError("not", "not failed"))
}
for i, sch := range s.AllOf {
schPath := "allOf/" + strconv.Itoa(i)
if err := validateInplace(sch, schPath); err != nil {
errors = append(errors, validationError(schPath, "allOf failed").add(err))
}
}
if len(s.AnyOf) > 0 {
matched := false
var causes []error
for i, sch := range s.AnyOf {
if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil {
matched = true
} else {
causes = append(causes, err)
}
}
if !matched {
errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...))
}
}
if len(s.OneOf) > 0 {
matched := -1
var causes []error
for i, sch := range s.OneOf {
if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil {
if matched == -1 {
matched = i
} else {
errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i))
break
}
} else {
causes = append(causes, err)
}
}
if matched == -1 {
errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...))
}
}
// if + then + else
if s.If != nil {
err := validateInplace(s.If, "if")
// "if" leaves dynamic scope
scope[len(scope)-1].discard = true
if err == nil {
if s.Then != nil {
if err := validateInplace(s.Then, "then"); err != nil {
errors = append(errors, validationError("then", "if-then failed").add(err))
}
}
} else {
if s.Else != nil {
if err := validateInplace(s.Else, "else"); err != nil {
errors = append(errors, validationError("else", "if-else failed").add(err))
}
}
}
// restore dynamic scope
scope[len(scope)-1].discard = false
}
for _, ext := range s.Extensions {
if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil {
errors = append(errors, err)
}
}
// UnevaluatedProperties + UnevaluatedItems
switch v := v.(type) {
case map[string]interface{}:
if s.UnevaluatedProperties != nil {
for pname := range result.unevalProps {
if pvalue, ok := v[pname]; ok {
if err := validate(s.UnevaluatedProperties, "UnevaluatedProperties", pvalue, escape(pname)); err != nil {
errors = append(errors, err)
}
}
}
result.unevalProps = nil
}
case []interface{}:
if s.UnevaluatedItems != nil {
for i := range result.unevalItems {
if err := validate(s.UnevaluatedItems, "UnevaluatedItems", v[i], strconv.Itoa(i)); err != nil {
errors = append(errors, err)
}
}
result.unevalItems = nil
}
}
switch len(errors) {
case 0:
return result, nil
case 1:
return result, errors[0]
default:
return result, validationError("", "").add(errors...) // empty message, used just for wrapping
}
}
type validationResult struct {
unevalProps map[string]struct{}
unevalItems map[int]struct{}
}
func (vr validationResult) unevalPnames() string {
pnames := make([]string, 0, len(vr.unevalProps))
for pname := range vr.unevalProps {
pnames = append(pnames, quote(pname))
}
return strings.Join(pnames, ", ")
}
// jsonType returns the json type of given value v.
//
// It panics if the given value is not valid json value
func jsonType(v interface{}) string {
switch v.(type) {
case nil:
return "null"
case bool:
return "boolean"
case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64:
return "number"
case string:
return "string"
case []interface{}:
return "array"
case map[string]interface{}:
return "object"
}
panic(InvalidJSONTypeError(fmt.Sprintf("%T", v)))
}
// equals tells if given two json values are equal or not.
func equals(v1, v2 interface{}) bool {
v1Type := jsonType(v1)
if v1Type != jsonType(v2) {
return false
}
switch v1Type {
case "array":
arr1, arr2 := v1.([]interface{}), v2.([]interface{})
if len(arr1) != len(arr2) {
return false
}
for i := range arr1 {
if !equals(arr1[i], arr2[i]) {
return false
}
}
return true
case "object":
obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{})
if len(obj1) != len(obj2) {
return false
}
for k, v1 := range obj1 {
if v2, ok := obj2[k]; ok {
if !equals(v1, v2) {
return false
}
} else {
return false
}
}
return true
case "number":
num1, _ := new(big.Rat).SetString(fmt.Sprint(v1))
num2, _ := new(big.Rat).SetString(fmt.Sprint(v2))
return num1.Cmp(num2) == 0
default:
return v1 == v2
}
}
// escape converts given token to valid json-pointer token
func escape(token string) string {
token = strings.ReplaceAll(token, "~", "~0")
token = strings.ReplaceAll(token, "/", "~1")
return url.PathEscape(token)
}

View file

@ -1,4 +0,0 @@
[submodule "testdata/JSON-Schema-Test-Suite"]
path = testdata/JSON-Schema-Test-Suite
url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git
branch = main

View file

@ -1,5 +0,0 @@
linters:
enable:
- nakedret
- errname
- godot

View file

@ -1,7 +0,0 @@
- id: jsonschema-validate
name: Validate JSON against JSON Schema
description: ensure json files follow specified JSON Schema
entry: jv
language: golang
additional_dependencies:
- ./cmd/jv

View file

@ -1,86 +0,0 @@
# jsonschema v6.0.0
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6)
[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v6)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v6)
[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=boon)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml)
[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/boon/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema/tree/boon)
see [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) for examples
## Library Features
- [x] pass [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite) excluding optional(compare with other impls at [bowtie](https://bowtie-json-schema.github.io/bowtie/#))
- [x] [![draft-04](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft4.json)](https://bowtie.report/#/dialects/draft4)
- [x] [![draft-06](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft6.json)](https://bowtie.report/#/dialects/draft6)
- [x] [![draft-07](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft7.json)](https://bowtie.report/#/dialects/draft7)
- [x] [![draft/2019-09](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2019-09.json)](https://bowtie.report/#/dialects/draft2019-09)
- [x] [![draft/2020-12](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2020-12.json)](https://bowtie.report/#/dialects/draft2020-12)
- [x] detect infinite loop traps
- [x] `$schema` cycle
- [x] validation cycle
- [x] custom `$schema` url
- [x] vocabulary based validation
- [x] custom regex engine
- [x] format assertions
- [x] flag to enable in draft >= 2019-09
- [x] custom format registration
- [x] built-in formats
- [x] regex, uuid
- [x] ipv4, ipv6
- [x] hostname, email
- [x] date, time, date-time, duration
- [x] json-pointer, relative-json-pointer
- [x] uri, uri-reference, uri-template
- [x] iri, iri-reference
- [x] period, semver
- [x] content assertions
- [x] flag to enable in draft >= 7
- [x] contentEncoding
- [x] base64
- [x] custom
- [x] contentMediaType
- [x] application/json
- [x] custom
- [x] contentSchema
- [x] errors
- [x] introspectable
- [x] hierarchy
- [x] alternative display with `#`
- [x] output
- [x] flag
- [x] basic
- [x] detailed
- [x] custom vocabulary
- enable via `$vocabulary` for draft >=2019-19
- enable via flag for draft <= 7
- [x] mixed dialect support
## CLI
to install: `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest`
```
Usage: jv [OPTIONS] SCHEMA [INSTANCE...]
Options:
-c, --assert-content Enable content assertions with draft >= 7
-f, --assert-format Enable format assertions with draft >= 2019
--cacert pem-file Use the specified pem-file to verify the peer. The file may contain multiple CA certificates
-d, --draft version Draft version used when '$schema' is missing. Valid values 4, 6, 7, 2019, 2020 (default 2020)
-h, --help Print help information
-k, --insecure Use insecure TLS connection
-o, --output format Output format. Valid values simple, alt, flag, basic, detailed (default "simple")
-q, --quiet Do not print errors
-v, --version Print build information
```
- [x] exit code `1` for validation erros, `2` for usage errors
- [x] validate both schema and multiple instances
- [x] support both json and yaml files
- [x] support standard input, use `-`
- [x] quite mode with parsable output
- [x] http(s) url support
- [x] custom certs for validation, use `--cacert`
- [x] flag to skip certificate verification, use `--insecure`

View file

@ -1,332 +0,0 @@
package jsonschema
import (
"fmt"
"regexp"
"slices"
)
// Compiler compiles json schema into *Schema.
type Compiler struct {
schemas map[urlPtr]*Schema
roots *roots
formats map[string]*Format
decoders map[string]*Decoder
mediaTypes map[string]*MediaType
assertFormat bool
assertContent bool
}
// NewCompiler create Compiler Object.
func NewCompiler() *Compiler {
return &Compiler{
schemas: map[urlPtr]*Schema{},
roots: newRoots(),
formats: map[string]*Format{},
decoders: map[string]*Decoder{},
mediaTypes: map[string]*MediaType{},
assertFormat: false,
assertContent: false,
}
}
// DefaultDraft overrides the draft used to
// compile schemas without `$schema` field.
//
// By default, this library uses the latest
// draft supported.
//
// The use of this option is HIGHLY encouraged
// to ensure continued correct operation of your
// schema. The current default value will not stay
// the same overtime.
func (c *Compiler) DefaultDraft(d *Draft) {
c.roots.defaultDraft = d
}
// AssertFormat always enables format assertions.
//
// Default Behavior:
// for draft-07: enabled.
// for draft/2019-09: disabled unless metaschema says `format` vocabulary is required.
// for draft/2020-12: disabled unless metaschema says `format-assertion` vocabulary is required.
func (c *Compiler) AssertFormat() {
c.assertFormat = true
}
// AssertContent enables content assertions.
//
// Content assertions include keywords:
// - contentEncoding
// - contentMediaType
// - contentSchema
//
// Default behavior is always disabled.
func (c *Compiler) AssertContent() {
c.assertContent = true
}
// RegisterFormat registers custom format.
//
// NOTE:
// - "regex" format can not be overridden
// - format assertions are disabled for draft >= 2019-09
// see [Compiler.AssertFormat]
func (c *Compiler) RegisterFormat(f *Format) {
if f.Name != "regex" {
c.formats[f.Name] = f
}
}
// RegisterContentEncoding registers custom contentEncoding.
//
// NOTE: content assertions are disabled by default.
// see [Compiler.AssertContent].
func (c *Compiler) RegisterContentEncoding(d *Decoder) {
c.decoders[d.Name] = d
}
// RegisterContentMediaType registers custom contentMediaType.
//
// NOTE: content assertions are disabled by default.
// see [Compiler.AssertContent].
func (c *Compiler) RegisterContentMediaType(mt *MediaType) {
c.mediaTypes[mt.Name] = mt
}
// RegisterVocabulary registers custom vocabulary.
//
// NOTE:
// - vocabularies are disabled for draft >= 2019-09
// see [Compiler.AssertVocabs]
func (c *Compiler) RegisterVocabulary(vocab *Vocabulary) {
c.roots.vocabularies[vocab.URL] = vocab
}
// AssertVocabs always enables user-defined vocabularies assertions.
//
// Default Behavior:
// for draft-07: enabled.
// for draft/2019-09: disabled unless metaschema enables a vocabulary.
// for draft/2020-12: disabled unless metaschema enables a vocabulary.
func (c *Compiler) AssertVocabs() {
c.roots.assertVocabs = true
}
// AddResource adds schema resource which gets used later in reference
// resolution.
//
// The argument url can be file path or url. Any fragment in url is ignored.
// The argument doc must be valid json value.
func (c *Compiler) AddResource(url string, doc any) error {
uf, err := absolute(url)
if err != nil {
return err
}
if isMeta(string(uf.url)) {
return &ResourceExistsError{string(uf.url)}
}
if !c.roots.loader.add(uf.url, doc) {
return &ResourceExistsError{string(uf.url)}
}
return nil
}
// UseLoader overrides the default [URLLoader] used
// to load schema resources.
func (c *Compiler) UseLoader(loader URLLoader) {
c.roots.loader.loader = loader
}
// UseRegexpEngine changes the regexp-engine used.
// By default it uses regexp package from go standard
// library.
//
// NOTE: must be called before compiling any schemas.
func (c *Compiler) UseRegexpEngine(engine RegexpEngine) {
if engine == nil {
engine = goRegexpCompile
}
c.roots.regexpEngine = engine
}
func (c *Compiler) enqueue(q *queue, up urlPtr) *Schema {
if sch, ok := c.schemas[up]; ok {
// already got compiled
return sch
}
if sch := q.get(up); sch != nil {
return sch
}
sch := newSchema(up)
q.append(sch)
return sch
}
// MustCompile is like [Compile] but panics if compilation fails.
// It simplifies safe initialization of global variables holding
// compiled schema.
func (c *Compiler) MustCompile(loc string) *Schema {
sch, err := c.Compile(loc)
if err != nil {
panic(fmt.Sprintf("jsonschema: Compile(%q): %v", loc, err))
}
return sch
}
// Compile compiles json-schema at given loc.
func (c *Compiler) Compile(loc string) (*Schema, error) {
uf, err := absolute(loc)
if err != nil {
return nil, err
}
up, err := c.roots.resolveFragment(*uf)
if err != nil {
return nil, err
}
return c.doCompile(up)
}
func (c *Compiler) doCompile(up urlPtr) (*Schema, error) {
q := &queue{}
compiled := 0
c.enqueue(q, up)
for q.len() > compiled {
sch := q.at(compiled)
if err := c.roots.ensureSubschema(sch.up); err != nil {
return nil, err
}
r := c.roots.roots[sch.up.url]
v, err := sch.up.lookup(r.doc)
if err != nil {
return nil, err
}
if err := c.compileValue(v, sch, r, q); err != nil {
return nil, err
}
compiled++
}
for _, sch := range *q {
c.schemas[sch.up] = sch
}
return c.schemas[up], nil
}
func (c *Compiler) compileValue(v any, sch *Schema, r *root, q *queue) error {
res := r.resource(sch.up.ptr)
sch.DraftVersion = res.dialect.draft.version
base := urlPtr{sch.up.url, res.ptr}
sch.resource = c.enqueue(q, base)
// if resource, enqueue dynamic anchors for compilation
if sch.DraftVersion >= 2020 && sch.up == sch.resource.up {
res := r.resource(sch.up.ptr)
for anchor, anchorPtr := range res.anchors {
if slices.Contains(res.dynamicAnchors, anchor) {
up := urlPtr{sch.up.url, anchorPtr}
danchorSch := c.enqueue(q, up)
if sch.dynamicAnchors == nil {
sch.dynamicAnchors = map[string]*Schema{}
}
sch.dynamicAnchors[string(anchor)] = danchorSch
}
}
}
switch v := v.(type) {
case bool:
sch.Bool = &v
case map[string]any:
if err := c.compileObject(v, sch, r, q); err != nil {
return err
}
}
sch.allPropsEvaluated = sch.AdditionalProperties != nil
if sch.DraftVersion < 2020 {
sch.allItemsEvaluated = sch.AdditionalItems != nil
switch items := sch.Items.(type) {
case *Schema:
sch.allItemsEvaluated = true
case []*Schema:
sch.numItemsEvaluated = len(items)
}
} else {
sch.allItemsEvaluated = sch.Items2020 != nil
sch.numItemsEvaluated = len(sch.PrefixItems)
}
return nil
}
func (c *Compiler) compileObject(obj map[string]any, sch *Schema, r *root, q *queue) error {
if len(obj) == 0 {
b := true
sch.Bool = &b
return nil
}
oc := objCompiler{
c: c,
obj: obj,
up: sch.up,
r: r,
res: r.resource(sch.up.ptr),
q: q,
}
return oc.compile(sch)
}
// queue --
type queue []*Schema
func (q *queue) append(sch *Schema) {
*q = append(*q, sch)
}
func (q *queue) at(i int) *Schema {
return (*q)[i]
}
func (q *queue) len() int {
return len(*q)
}
func (q *queue) get(up urlPtr) *Schema {
i := slices.IndexFunc(*q, func(sch *Schema) bool { return sch.up == up })
if i != -1 {
return (*q)[i]
}
return nil
}
// regexp --
// Regexp is the representation of compiled regular expression.
type Regexp interface {
fmt.Stringer
// MatchString reports whether the string s contains
// any match of the regular expression.
MatchString(string) bool
}
// RegexpEngine parses a regular expression and returns,
// if successful, a Regexp object that can be used to
// match against text.
type RegexpEngine func(string) (Regexp, error)
func (re RegexpEngine) validate(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
_, err := re(s)
return err
}
func goRegexpCompile(s string) (Regexp, error) {
return regexp.Compile(s)
}

View file

@ -1,51 +0,0 @@
package jsonschema
import (
"bytes"
"encoding/base64"
"encoding/json"
)
// Decoder specifies how to decode specific contentEncoding.
type Decoder struct {
// Name of contentEncoding.
Name string
// Decode given string to byte array.
Decode func(string) ([]byte, error)
}
var decoders = map[string]*Decoder{
"base64": {
Name: "base64",
Decode: func(s string) ([]byte, error) {
return base64.StdEncoding.DecodeString(s)
},
},
}
// MediaType specified how to validate bytes against specific contentMediaType.
type MediaType struct {
// Name of contentMediaType.
Name string
// Validate checks whether bytes conform to this mediatype.
Validate func([]byte) error
// UnmarshalJSON unmarshals bytes into json value.
// This must be nil if this mediatype is not compatible
// with json.
UnmarshalJSON func([]byte) (any, error)
}
var mediaTypes = map[string]*MediaType{
"application/json": {
Name: "application/json",
Validate: func(b []byte) error {
var v any
return json.Unmarshal(b, &v)
},
UnmarshalJSON: func(b []byte) (any, error) {
return UnmarshalJSON(bytes.NewReader(b))
},
},
}

View file

@ -1,360 +0,0 @@
package jsonschema
import (
"fmt"
"slices"
"strings"
)
// A Draft represents json-schema specification.
type Draft struct {
version int
url string
sch *Schema
id string // property name used to represent id
subschemas []SchemaPath // locations of subschemas
vocabPrefix string // prefix used for vocabulary
allVocabs map[string]*Schema // names of supported vocabs with its schemas
defaultVocabs []string // names of default vocabs
}
// String returns the specification url.
func (d *Draft) String() string {
return d.url
}
var (
Draft4 = &Draft{
version: 4,
url: "http://json-schema.org/draft-04/schema",
id: "id",
subschemas: []SchemaPath{
// type agonistic
schemaPath("definitions/*"),
schemaPath("not"),
schemaPath("allOf/[]"),
schemaPath("anyOf/[]"),
schemaPath("oneOf/[]"),
// object
schemaPath("properties/*"),
schemaPath("additionalProperties"),
schemaPath("patternProperties/*"),
// array
schemaPath("items"),
schemaPath("items/[]"),
schemaPath("additionalItems"),
schemaPath("dependencies/*"),
},
vocabPrefix: "",
allVocabs: map[string]*Schema{},
defaultVocabs: []string{},
}
Draft6 = &Draft{
version: 6,
url: "http://json-schema.org/draft-06/schema",
id: "$id",
subschemas: joinSubschemas(Draft4.subschemas,
schemaPath("propertyNames"),
schemaPath("contains"),
),
vocabPrefix: "",
allVocabs: map[string]*Schema{},
defaultVocabs: []string{},
}
Draft7 = &Draft{
version: 7,
url: "http://json-schema.org/draft-07/schema",
id: "$id",
subschemas: joinSubschemas(Draft6.subschemas,
schemaPath("if"),
schemaPath("then"),
schemaPath("else"),
),
vocabPrefix: "",
allVocabs: map[string]*Schema{},
defaultVocabs: []string{},
}
Draft2019 = &Draft{
version: 2019,
url: "https://json-schema.org/draft/2019-09/schema",
id: "$id",
subschemas: joinSubschemas(Draft7.subschemas,
schemaPath("$defs/*"),
schemaPath("dependentSchemas/*"),
schemaPath("unevaluatedProperties"),
schemaPath("unevaluatedItems"),
schemaPath("contentSchema"),
),
vocabPrefix: "https://json-schema.org/draft/2019-09/vocab/",
allVocabs: map[string]*Schema{
"core": nil,
"applicator": nil,
"validation": nil,
"meta-data": nil,
"format": nil,
"content": nil,
},
defaultVocabs: []string{"core", "applicator", "validation"},
}
Draft2020 = &Draft{
version: 2020,
url: "https://json-schema.org/draft/2020-12/schema",
id: "$id",
subschemas: joinSubschemas(Draft2019.subschemas,
schemaPath("prefixItems/[]"),
),
vocabPrefix: "https://json-schema.org/draft/2020-12/vocab/",
allVocabs: map[string]*Schema{
"core": nil,
"applicator": nil,
"unevaluated": nil,
"validation": nil,
"meta-data": nil,
"format-annotation": nil,
"format-assertion": nil,
"content": nil,
},
defaultVocabs: []string{"core", "applicator", "unevaluated", "validation"},
}
draftLatest = Draft2020
)
func init() {
c := NewCompiler()
c.AssertFormat()
for _, d := range []*Draft{Draft4, Draft6, Draft7, Draft2019, Draft2020} {
d.sch = c.MustCompile(d.url)
for name := range d.allVocabs {
d.allVocabs[name] = c.MustCompile(strings.TrimSuffix(d.url, "schema") + "meta/" + name)
}
}
}
func draftFromURL(url string) *Draft {
u, frag := split(url)
if frag != "" {
return nil
}
u, ok := strings.CutPrefix(u, "http://")
if !ok {
u, _ = strings.CutPrefix(u, "https://")
}
switch u {
case "json-schema.org/schema":
return draftLatest
case "json-schema.org/draft/2020-12/schema":
return Draft2020
case "json-schema.org/draft/2019-09/schema":
return Draft2019
case "json-schema.org/draft-07/schema":
return Draft7
case "json-schema.org/draft-06/schema":
return Draft6
case "json-schema.org/draft-04/schema":
return Draft4
default:
return nil
}
}
func (d *Draft) getID(obj map[string]any) string {
if d.version < 2019 {
if _, ok := obj["$ref"]; ok {
// All other properties in a "$ref" object MUST be ignored
return ""
}
}
id, ok := strVal(obj, d.id)
if !ok {
return ""
}
id, _ = split(id) // ignore fragment
return id
}
func (d *Draft) getVocabs(url url, doc any, vocabularies map[string]*Vocabulary) ([]string, error) {
if d.version < 2019 {
return nil, nil
}
obj, ok := doc.(map[string]any)
if !ok {
return nil, nil
}
v, ok := obj["$vocabulary"]
if !ok {
return nil, nil
}
obj, ok = v.(map[string]any)
if !ok {
return nil, nil
}
var vocabs []string
for vocab, reqd := range obj {
if reqd, ok := reqd.(bool); !ok || !reqd {
continue
}
name, ok := strings.CutPrefix(vocab, d.vocabPrefix)
if ok {
if _, ok := d.allVocabs[name]; ok {
if !slices.Contains(vocabs, name) {
vocabs = append(vocabs, name)
continue
}
}
}
if _, ok := vocabularies[vocab]; !ok {
return nil, &UnsupportedVocabularyError{url.String(), vocab}
}
if !slices.Contains(vocabs, vocab) {
vocabs = append(vocabs, vocab)
}
}
if !slices.Contains(vocabs, "core") {
vocabs = append(vocabs, "core")
}
return vocabs, nil
}
// --
type dialect struct {
draft *Draft
vocabs []string // nil means use draft.defaultVocabs
}
func (d *dialect) hasVocab(name string) bool {
if name == "core" || d.draft.version < 2019 {
return true
}
if d.vocabs != nil {
return slices.Contains(d.vocabs, name)
}
return slices.Contains(d.draft.defaultVocabs, name)
}
func (d *dialect) activeVocabs(assertVocabs bool, vocabularies map[string]*Vocabulary) []string {
if len(vocabularies) == 0 {
return d.vocabs
}
if d.draft.version < 2019 {
assertVocabs = true
}
if !assertVocabs {
return d.vocabs
}
var vocabs []string
if d.vocabs == nil {
vocabs = slices.Clone(d.draft.defaultVocabs)
} else {
vocabs = slices.Clone(d.vocabs)
}
for vocab := range vocabularies {
if !slices.Contains(vocabs, vocab) {
vocabs = append(vocabs, vocab)
}
}
return vocabs
}
func (d *dialect) getSchema(assertVocabs bool, vocabularies map[string]*Vocabulary) *Schema {
vocabs := d.activeVocabs(assertVocabs, vocabularies)
if vocabs == nil {
return d.draft.sch
}
var allOf []*Schema
for _, vocab := range vocabs {
sch := d.draft.allVocabs[vocab]
if sch == nil {
if v, ok := vocabularies[vocab]; ok {
sch = v.Schema
}
}
if sch != nil {
allOf = append(allOf, sch)
}
}
if !slices.Contains(vocabs, "core") {
sch := d.draft.allVocabs["core"]
if sch == nil {
sch = d.draft.sch
}
allOf = append(allOf, sch)
}
sch := &Schema{
Location: "urn:mem:metaschema",
up: urlPtr{url("urn:mem:metaschema"), ""},
DraftVersion: d.draft.version,
AllOf: allOf,
}
sch.resource = sch
if sch.DraftVersion >= 2020 {
sch.DynamicAnchor = "meta"
sch.dynamicAnchors = map[string]*Schema{
"meta": sch,
}
}
return sch
}
// --
type ParseIDError struct {
URL string
}
func (e *ParseIDError) Error() string {
return fmt.Sprintf("error in parsing id at %q", e.URL)
}
// --
type ParseAnchorError struct {
URL string
}
func (e *ParseAnchorError) Error() string {
return fmt.Sprintf("error in parsing anchor at %q", e.URL)
}
// --
type DuplicateIDError struct {
ID string
URL string
Ptr1 string
Ptr2 string
}
func (e *DuplicateIDError) Error() string {
return fmt.Sprintf("duplicate id %q in %q at %q and %q", e.ID, e.URL, e.Ptr1, e.Ptr2)
}
// --
type DuplicateAnchorError struct {
Anchor string
URL string
Ptr1 string
Ptr2 string
}
func (e *DuplicateAnchorError) Error() string {
return fmt.Sprintf("duplicate anchor %q in %q at %q and %q", e.Anchor, e.URL, e.Ptr1, e.Ptr2)
}
// --
func joinSubschemas(a1 []SchemaPath, a2 ...SchemaPath) []SchemaPath {
var a []SchemaPath
a = append(a, a1...)
a = append(a, a2...)
return a
}

View file

@ -1,708 +0,0 @@
package jsonschema
import (
"net/netip"
gourl "net/url"
"strconv"
"strings"
"time"
)
// Format defined specific format.
type Format struct {
// Name of format.
Name string
// Validate checks if given value is of this format.
Validate func(v any) error
}
var formats = map[string]*Format{
"json-pointer": {"json-pointer", validateJSONPointer},
"relative-json-pointer": {"relative-json-pointer", validateRelativeJSONPointer},
"uuid": {"uuid", validateUUID},
"duration": {"duration", validateDuration},
"period": {"period", validatePeriod},
"ipv4": {"ipv4", validateIPV4},
"ipv6": {"ipv6", validateIPV6},
"hostname": {"hostname", validateHostname},
"email": {"email", validateEmail},
"date": {"date", validateDate},
"time": {"time", validateTime},
"date-time": {"date-time", validateDateTime},
"uri": {"uri", validateURI},
"iri": {"iri", validateURI},
"uri-reference": {"uri-reference", validateURIReference},
"iri-reference": {"iri-reference", validateURIReference},
"uri-template": {"uri-template", validateURITemplate},
"semver": {"semver", validateSemver},
}
// see https://www.rfc-editor.org/rfc/rfc6901#section-3
func validateJSONPointer(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
if s == "" {
return nil
}
if !strings.HasPrefix(s, "/") {
return LocalizableError("not starting with /")
}
for _, tok := range strings.Split(s, "/")[1:] {
escape := false
for _, ch := range tok {
if escape {
escape = false
if ch != '0' && ch != '1' {
return LocalizableError("~ must be followed by 0 or 1")
}
continue
}
if ch == '~' {
escape = true
continue
}
switch {
case ch >= '\x00' && ch <= '\x2E':
case ch >= '\x30' && ch <= '\x7D':
case ch >= '\x7F' && ch <= '\U0010FFFF':
default:
return LocalizableError("invalid character %q", ch)
}
}
if escape {
return LocalizableError("~ must be followed by 0 or 1")
}
}
return nil
}
// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
func validateRelativeJSONPointer(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
// start with non-negative-integer
numDigits := 0
for _, ch := range s {
if ch >= '0' && ch <= '9' {
numDigits++
} else {
break
}
}
if numDigits == 0 {
return LocalizableError("must start with non-negative integer")
}
if numDigits > 1 && strings.HasPrefix(s, "0") {
return LocalizableError("starts with zero")
}
s = s[numDigits:]
// followed by either json-pointer or '#'
if s == "#" {
return nil
}
return validateJSONPointer(s)
}
// see https://datatracker.ietf.org/doc/html/rfc4122#page-4
func validateUUID(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
hexGroups := []int{8, 4, 4, 4, 12}
groups := strings.Split(s, "-")
if len(groups) != len(hexGroups) {
return LocalizableError("must have %d elements", len(hexGroups))
}
for i, group := range groups {
if len(group) != hexGroups[i] {
return LocalizableError("element %d must be %d characters long", i+1, hexGroups[i])
}
for _, ch := range group {
switch {
case ch >= '0' && ch <= '9':
case ch >= 'a' && ch <= 'f':
case ch >= 'A' && ch <= 'F':
default:
return LocalizableError("non-hex character %q", ch)
}
}
}
return nil
}
// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A
func validateDuration(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
// must start with 'P'
s, ok = strings.CutPrefix(s, "P")
if !ok {
return LocalizableError("must start with P")
}
if s == "" {
return LocalizableError("nothing after P")
}
// dur-week
if s, ok := strings.CutSuffix(s, "W"); ok {
if s == "" {
return LocalizableError("no number in week")
}
for _, ch := range s {
if ch < '0' || ch > '9' {
return LocalizableError("invalid week")
}
}
return nil
}
allUnits := []string{"YMD", "HMS"}
for i, s := range strings.Split(s, "T") {
if i != 0 && s == "" {
return LocalizableError("no time elements")
}
if i >= len(allUnits) {
return LocalizableError("more than one T")
}
units := allUnits[i]
for s != "" {
digitCount := 0
for _, ch := range s {
if ch >= '0' && ch <= '9' {
digitCount++
} else {
break
}
}
if digitCount == 0 {
return LocalizableError("missing number")
}
s = s[digitCount:]
if s == "" {
return LocalizableError("missing unit")
}
unit := s[0]
j := strings.IndexByte(units, unit)
if j == -1 {
if strings.IndexByte(allUnits[i], unit) != -1 {
return LocalizableError("unit %q out of order", unit)
}
return LocalizableError("invalid unit %q", unit)
}
units = units[j+1:]
s = s[1:]
}
}
return nil
}
func validateIPV4(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
groups := strings.Split(s, ".")
if len(groups) != 4 {
return LocalizableError("expected four decimals")
}
for _, group := range groups {
if len(group) > 1 && group[0] == '0' {
return LocalizableError("leading zeros")
}
n, err := strconv.Atoi(group)
if err != nil {
return err
}
if n < 0 || n > 255 {
return LocalizableError("decimal must be between 0 and 255")
}
}
return nil
}
func validateIPV6(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
if !strings.Contains(s, ":") {
return LocalizableError("missing colon")
}
addr, err := netip.ParseAddr(s)
if err != nil {
return err
}
if addr.Zone() != "" {
return LocalizableError("zone id is not a part of ipv6 address")
}
return nil
}
// see https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
func validateHostname(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
// entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters
s = strings.TrimSuffix(s, ".")
if len(s) > 253 {
return LocalizableError("more than 253 characters long")
}
// Hostnames are composed of series of labels concatenated with dots, as are all domain names
for _, label := range strings.Split(s, ".") {
// Each label must be from 1 to 63 characters long
if len(label) < 1 || len(label) > 63 {
return LocalizableError("label must be 1 to 63 characters long")
}
// labels must not start or end with a hyphen
if strings.HasPrefix(label, "-") {
return LocalizableError("label starts with hyphen")
}
if strings.HasSuffix(label, "-") {
return LocalizableError("label ends with hyphen")
}
// labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner),
// the digits '0' through '9', and the hyphen ('-')
for _, ch := range label {
switch {
case ch >= 'a' && ch <= 'z':
case ch >= 'A' && ch <= 'Z':
case ch >= '0' && ch <= '9':
case ch == '-':
default:
return LocalizableError("invalid character %q", ch)
}
}
}
return nil
}
// see https://en.wikipedia.org/wiki/Email_address
func validateEmail(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
// entire email address to be no more than 254 characters long
if len(s) > 254 {
return LocalizableError("more than 255 characters long")
}
// email address is generally recognized as having two parts joined with an at-sign
at := strings.LastIndexByte(s, '@')
if at == -1 {
return LocalizableError("missing @")
}
local, domain := s[:at], s[at+1:]
// local part may be up to 64 characters long
if len(local) > 64 {
return LocalizableError("local part more than 64 characters long")
}
if len(local) > 1 && strings.HasPrefix(local, `"`) && strings.HasPrefix(local, `"`) {
// quoted
local := local[1 : len(local)-1]
if strings.IndexByte(local, '\\') != -1 || strings.IndexByte(local, '"') != -1 {
return LocalizableError("backslash and quote are not allowed within quoted local part")
}
} else {
// unquoted
if strings.HasPrefix(local, ".") {
return LocalizableError("starts with dot")
}
if strings.HasSuffix(local, ".") {
return LocalizableError("ends with dot")
}
// consecutive dots not allowed
if strings.Contains(local, "..") {
return LocalizableError("consecutive dots")
}
// check allowed chars
for _, ch := range local {
switch {
case ch >= 'a' && ch <= 'z':
case ch >= 'A' && ch <= 'Z':
case ch >= '0' && ch <= '9':
case strings.ContainsRune(".!#$%&'*+-/=?^_`{|}~", ch):
default:
return LocalizableError("invalid character %q", ch)
}
}
}
// domain if enclosed in brackets, must match an IP address
if strings.HasPrefix(domain, "[") && strings.HasSuffix(domain, "]") {
domain = domain[1 : len(domain)-1]
if rem, ok := strings.CutPrefix(domain, "IPv6:"); ok {
if err := validateIPV6(rem); err != nil {
return LocalizableError("invalid ipv6 address: %v", err)
}
return nil
}
if err := validateIPV4(domain); err != nil {
return LocalizableError("invalid ipv4 address: %v", err)
}
return nil
}
// domain must match the requirements for a hostname
if err := validateHostname(domain); err != nil {
return LocalizableError("invalid domain: %v", err)
}
return nil
}
// see see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6
func validateDate(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
_, err := time.Parse("2006-01-02", s)
return err
}
// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6
// NOTE: golang time package does not support leap seconds.
func validateTime(v any) error {
str, ok := v.(string)
if !ok {
return nil
}
// min: hh:mm:ssZ
if len(str) < 9 {
return LocalizableError("less than 9 characters long")
}
if str[2] != ':' || str[5] != ':' {
return LocalizableError("missing colon in correct place")
}
// parse hh:mm:ss
var hms []int
for _, tok := range strings.SplitN(str[:8], ":", 3) {
i, err := strconv.Atoi(tok)
if err != nil {
return LocalizableError("invalid hour/min/sec")
}
if i < 0 {
return LocalizableError("non-positive hour/min/sec")
}
hms = append(hms, i)
}
if len(hms) != 3 {
return LocalizableError("missing hour/min/sec")
}
h, m, s := hms[0], hms[1], hms[2]
if h > 23 || m > 59 || s > 60 {
return LocalizableError("hour/min/sec out of range")
}
str = str[8:]
// parse sec-frac if present
if rem, ok := strings.CutPrefix(str, "."); ok {
numDigits := 0
for _, ch := range rem {
if ch >= '0' && ch <= '9' {
numDigits++
} else {
break
}
}
if numDigits == 0 {
return LocalizableError("no digits in second fraction")
}
str = rem[numDigits:]
}
if str != "z" && str != "Z" {
// parse time-numoffset
if len(str) != 6 {
return LocalizableError("offset must be 6 characters long")
}
var sign int
switch str[0] {
case '+':
sign = -1
case '-':
sign = +1
default:
return LocalizableError("offset must begin with plus/minus")
}
str = str[1:]
if str[2] != ':' {
return LocalizableError("missing colon in offset in correct place")
}
var zhm []int
for _, tok := range strings.SplitN(str, ":", 2) {
i, err := strconv.Atoi(tok)
if err != nil {
return LocalizableError("invalid hour/min in offset")
}
if i < 0 {
return LocalizableError("non-positive hour/min in offset")
}
zhm = append(zhm, i)
}
zh, zm := zhm[0], zhm[1]
if zh > 23 || zm > 59 {
return LocalizableError("hour/min in offset out of range")
}
// apply timezone
hm := (h*60 + m) + sign*(zh*60+zm)
if hm < 0 {
hm += 24 * 60
}
h, m = hm/60, hm%60
}
// check leap second
if s >= 60 && (h != 23 || m != 59) {
return LocalizableError("invalid leap second")
}
return nil
}
// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6
func validateDateTime(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
// min: yyyy-mm-ddThh:mm:ssZ
if len(s) < 20 {
return LocalizableError("less than 20 characters long")
}
if s[10] != 't' && s[10] != 'T' {
return LocalizableError("11th character must be t or T")
}
if err := validateDate(s[:10]); err != nil {
return LocalizableError("invalid date element: %v", err)
}
if err := validateTime(s[11:]); err != nil {
return LocalizableError("invalid time element: %v", err)
}
return nil
}
func parseURL(s string) (*gourl.URL, error) {
u, err := gourl.Parse(s)
if err != nil {
return nil, err
}
// gourl does not validate ipv6 host address
hostName := u.Hostname()
if strings.Contains(hostName, ":") {
if !strings.Contains(u.Host, "[") || !strings.Contains(u.Host, "]") {
return nil, LocalizableError("ipv6 address not enclosed in brackets")
}
if err := validateIPV6(hostName); err != nil {
return nil, LocalizableError("invalid ipv6 address: %v", err)
}
}
return u, nil
}
func validateURI(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
u, err := parseURL(s)
if err != nil {
return err
}
if !u.IsAbs() {
return LocalizableError("relative url")
}
return nil
}
func validateURIReference(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
if strings.Contains(s, `\`) {
return LocalizableError(`contains \`)
}
_, err := parseURL(s)
return err
}
func validateURITemplate(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
u, err := parseURL(s)
if err != nil {
return err
}
for _, tok := range strings.Split(u.RawPath, "/") {
tok, err = decode(tok)
if err != nil {
return LocalizableError("percent decode failed: %v", err)
}
want := true
for _, ch := range tok {
var got bool
switch ch {
case '{':
got = true
case '}':
got = false
default:
continue
}
if got != want {
return LocalizableError("nested curly braces")
}
want = !want
}
if !want {
return LocalizableError("no matching closing brace")
}
}
return nil
}
func validatePeriod(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
slash := strings.IndexByte(s, '/')
if slash == -1 {
return LocalizableError("missing slash")
}
start, end := s[:slash], s[slash+1:]
if strings.HasPrefix(start, "P") {
if err := validateDuration(start); err != nil {
return LocalizableError("invalid start duration: %v", err)
}
if err := validateDateTime(end); err != nil {
return LocalizableError("invalid end date-time: %v", err)
}
} else {
if err := validateDateTime(start); err != nil {
return LocalizableError("invalid start date-time: %v", err)
}
if strings.HasPrefix(end, "P") {
if err := validateDuration(end); err != nil {
return LocalizableError("invalid end duration: %v", err)
}
} else if err := validateDateTime(end); err != nil {
return LocalizableError("invalid end date-time: %v", err)
}
}
return nil
}
// see https://semver.org/#backusnaur-form-grammar-for-valid-semver-versions
func validateSemver(v any) error {
s, ok := v.(string)
if !ok {
return nil
}
// build --
if i := strings.IndexByte(s, '+'); i != -1 {
build := s[i+1:]
if build == "" {
return LocalizableError("build is empty")
}
for _, buildID := range strings.Split(build, ".") {
if buildID == "" {
return LocalizableError("build identifier is empty")
}
for _, ch := range buildID {
switch {
case ch >= '0' && ch <= '9':
case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-':
default:
return LocalizableError("invalid character %q in build identifier", ch)
}
}
}
s = s[:i]
}
// pre-release --
if i := strings.IndexByte(s, '-'); i != -1 {
preRelease := s[i+1:]
for _, preReleaseID := range strings.Split(preRelease, ".") {
if preReleaseID == "" {
return LocalizableError("pre-release identifier is empty")
}
allDigits := true
for _, ch := range preReleaseID {
switch {
case ch >= '0' && ch <= '9':
case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-':
allDigits = false
default:
return LocalizableError("invalid character %q in pre-release identifier", ch)
}
}
if allDigits && len(preReleaseID) > 1 && preReleaseID[0] == '0' {
return LocalizableError("pre-release numeric identifier starts with zero")
}
}
s = s[:i]
}
// versionCore --
versions := strings.Split(s, ".")
if len(versions) != 3 {
return LocalizableError("versionCore must have 3 numbers separated by dot")
}
names := []string{"major", "minor", "patch"}
for i, version := range versions {
if version == "" {
return LocalizableError("%s is empty", names[i])
}
if len(version) > 1 && version[0] == '0' {
return LocalizableError("%s starts with zero", names[i])
}
for _, ch := range version {
if ch < '0' || ch > '9' {
return LocalizableError("%s contains non-digit", names[i])
}
}
}
return nil
}

View file

@ -1,8 +0,0 @@
go 1.21.1
use (
.
./cmd/jv
)
replace github.com/santhosh-tekuri/jsonschema/v6 v6.0.0 => ./

View file

@ -1,651 +0,0 @@
package kind
import (
"fmt"
"math/big"
"strings"
"golang.org/x/text/message"
)
// --
type InvalidJsonValue struct {
Value any
}
func (*InvalidJsonValue) KeywordPath() []string {
return nil
}
func (k *InvalidJsonValue) LocalizedString(p *message.Printer) string {
return p.Sprintf("invalid jsonType %T", k.Value)
}
// --
type Schema struct {
Location string
}
func (*Schema) KeywordPath() []string {
return nil
}
func (k *Schema) LocalizedString(p *message.Printer) string {
return p.Sprintf("jsonschema validation failed with %s", quote(k.Location))
}
// --
type Group struct{}
func (*Group) KeywordPath() []string {
return nil
}
func (*Group) LocalizedString(p *message.Printer) string {
return p.Sprintf("validation failed")
}
// --
type Not struct{}
func (*Not) KeywordPath() []string {
return nil
}
func (*Not) LocalizedString(p *message.Printer) string {
return p.Sprintf("not failed")
}
// --
type AllOf struct{}
func (*AllOf) KeywordPath() []string {
return []string{"allOf"}
}
func (*AllOf) LocalizedString(p *message.Printer) string {
return p.Sprintf("allOf failed")
}
// --
type AnyOf struct{}
func (*AnyOf) KeywordPath() []string {
return []string{"anyOf"}
}
func (*AnyOf) LocalizedString(p *message.Printer) string {
return p.Sprintf("anyOf failed")
}
// --
type OneOf struct {
// Subschemas gives indexes of Subschemas that have matched.
// Value nil, means none of the subschemas matched.
Subschemas []int
}
func (*OneOf) KeywordPath() []string {
return []string{"oneOf"}
}
func (k *OneOf) LocalizedString(p *message.Printer) string {
if len(k.Subschemas) == 0 {
return p.Sprintf("oneOf failed, none matched")
}
return p.Sprintf("oneOf failed, subschemas %d, %d matched", k.Subschemas[0], k.Subschemas[1])
}
//--
type FalseSchema struct{}
func (*FalseSchema) KeywordPath() []string {
return nil
}
func (*FalseSchema) LocalizedString(p *message.Printer) string {
return p.Sprintf("false schema")
}
// --
type RefCycle struct {
URL string
KeywordLocation1 string
KeywordLocation2 string
}
func (*RefCycle) KeywordPath() []string {
return nil
}
func (k *RefCycle) LocalizedString(p *message.Printer) string {
return p.Sprintf("both %s and %s resolve to %q causing reference cycle", k.KeywordLocation1, k.KeywordLocation2, k.URL)
}
// --
type Type struct {
Got string
Want []string
}
func (*Type) KeywordPath() []string {
return []string{"type"}
}
func (k *Type) LocalizedString(p *message.Printer) string {
want := strings.Join(k.Want, " or ")
return p.Sprintf("got %s, want %s", k.Got, want)
}
// --
type Enum struct {
Got any
Want []any
}
// KeywordPath implements jsonschema.ErrorKind.
func (*Enum) KeywordPath() []string {
return []string{"enum"}
}
func (k *Enum) LocalizedString(p *message.Printer) string {
allPrimitive := true
loop:
for _, item := range k.Want {
switch item.(type) {
case []any, map[string]any:
allPrimitive = false
break loop
}
}
if allPrimitive {
if len(k.Want) == 1 {
return p.Sprintf("value must be %s", display(k.Want[0]))
}
var want []string
for _, v := range k.Want {
want = append(want, display(v))
}
return p.Sprintf("value must be one of %s", strings.Join(want, ", "))
}
return p.Sprintf("enum failed")
}
// --
type Const struct {
Got any
Want any
}
func (*Const) KeywordPath() []string {
return []string{"const"}
}
func (k *Const) LocalizedString(p *message.Printer) string {
switch want := k.Want.(type) {
case []any, map[string]any:
return p.Sprintf("const failed")
default:
return p.Sprintf("value must be %s", display(want))
}
}
// --
type Format struct {
Got any
Want string
Err error
}
func (*Format) KeywordPath() []string {
return []string{"format"}
}
func (k *Format) LocalizedString(p *message.Printer) string {
return p.Sprintf("%s is not valid %s: %v", display(k.Got), k.Want, localizedError(k.Err, p))
}
// --
type Reference struct {
Keyword string
URL string
}
func (k *Reference) KeywordPath() []string {
return []string{k.Keyword}
}
func (*Reference) LocalizedString(p *message.Printer) string {
return p.Sprintf("validation failed")
}
// --
type MinProperties struct {
Got, Want int
}
func (*MinProperties) KeywordPath() []string {
return []string{"minProperties"}
}
func (k *MinProperties) LocalizedString(p *message.Printer) string {
return p.Sprintf("minProperties: got %d, want %d", k.Got, k.Want)
}
// --
type MaxProperties struct {
Got, Want int
}
func (*MaxProperties) KeywordPath() []string {
return []string{"maxProperties"}
}
func (k *MaxProperties) LocalizedString(p *message.Printer) string {
return p.Sprintf("maxProperties: got %d, want %d", k.Got, k.Want)
}
// --
type MinItems struct {
Got, Want int
}
func (*MinItems) KeywordPath() []string {
return []string{"minItems"}
}
func (k *MinItems) LocalizedString(p *message.Printer) string {
return p.Sprintf("minItems: got %d, want %d", k.Got, k.Want)
}
// --
type MaxItems struct {
Got, Want int
}
func (*MaxItems) KeywordPath() []string {
return []string{"maxItems"}
}
func (k *MaxItems) LocalizedString(p *message.Printer) string {
return p.Sprintf("maxItems: got %d, want %d", k.Got, k.Want)
}
// --
type AdditionalItems struct {
Count int
}
func (*AdditionalItems) KeywordPath() []string {
return []string{"additionalItems"}
}
func (k *AdditionalItems) LocalizedString(p *message.Printer) string {
return p.Sprintf("last %d additionalItem(s) not allowed", k.Count)
}
// --
type Required struct {
Missing []string
}
func (*Required) KeywordPath() []string {
return []string{"required"}
}
func (k *Required) LocalizedString(p *message.Printer) string {
if len(k.Missing) == 1 {
return p.Sprintf("missing property %s", quote(k.Missing[0]))
}
return p.Sprintf("missing properties %s", joinQuoted(k.Missing, ", "))
}
// --
type Dependency struct {
Prop string // dependency of prop that failed
Missing []string // missing props
}
func (k *Dependency) KeywordPath() []string {
return []string{"dependency", k.Prop}
}
func (k *Dependency) LocalizedString(p *message.Printer) string {
return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop))
}
// --
type DependentRequired struct {
Prop string // dependency of prop that failed
Missing []string // missing props
}
func (k *DependentRequired) KeywordPath() []string {
return []string{"dependentRequired", k.Prop}
}
func (k *DependentRequired) LocalizedString(p *message.Printer) string {
return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop))
}
// --
type AdditionalProperties struct {
Properties []string
}
func (*AdditionalProperties) KeywordPath() []string {
return []string{"additionalProperties"}
}
func (k *AdditionalProperties) LocalizedString(p *message.Printer) string {
return p.Sprintf("additional properties %s not allowed", joinQuoted(k.Properties, ", "))
}
// --
type PropertyNames struct {
Property string
}
func (*PropertyNames) KeywordPath() []string {
return []string{"propertyNames"}
}
func (k *PropertyNames) LocalizedString(p *message.Printer) string {
return p.Sprintf("invalid propertyName %s", quote(k.Property))
}
// --
type UniqueItems struct {
Duplicates [2]int
}
func (*UniqueItems) KeywordPath() []string {
return []string{"uniqueItems"}
}
func (k *UniqueItems) LocalizedString(p *message.Printer) string {
return p.Sprintf("items at %d and %d are equal", k.Duplicates[0], k.Duplicates[1])
}
// --
type Contains struct{}
func (*Contains) KeywordPath() []string {
return []string{"contains"}
}
func (*Contains) LocalizedString(p *message.Printer) string {
return p.Sprintf("no items match contains schema")
}
// --
type MinContains struct {
Got []int
Want int
}
func (*MinContains) KeywordPath() []string {
return []string{"minContains"}
}
func (k *MinContains) LocalizedString(p *message.Printer) string {
if len(k.Got) == 0 {
return p.Sprintf("min %d items required to match contains schema, but none matched", k.Want)
} else {
got := fmt.Sprintf("%v", k.Got)
return p.Sprintf("min %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1])
}
}
// --
type MaxContains struct {
Got []int
Want int
}
func (*MaxContains) KeywordPath() []string {
return []string{"maxContains"}
}
func (k *MaxContains) LocalizedString(p *message.Printer) string {
got := fmt.Sprintf("%v", k.Got)
return p.Sprintf("max %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1])
}
// --
type MinLength struct {
Got, Want int
}
func (*MinLength) KeywordPath() []string {
return []string{"minLength"}
}
func (k *MinLength) LocalizedString(p *message.Printer) string {
return p.Sprintf("minLength: got %d, want %d", k.Got, k.Want)
}
// --
type MaxLength struct {
Got, Want int
}
func (*MaxLength) KeywordPath() []string {
return []string{"maxLength"}
}
func (k *MaxLength) LocalizedString(p *message.Printer) string {
return p.Sprintf("maxLength: got %d, want %d", k.Got, k.Want)
}
// --
type Pattern struct {
Got string
Want string
}
func (*Pattern) KeywordPath() []string {
return []string{"pattern"}
}
func (k *Pattern) LocalizedString(p *message.Printer) string {
return p.Sprintf("%s does not match pattern %s", quote(k.Got), quote(k.Want))
}
// --
type ContentEncoding struct {
Want string
Err error
}
func (*ContentEncoding) KeywordPath() []string {
return []string{"contentEncoding"}
}
func (k *ContentEncoding) LocalizedString(p *message.Printer) string {
return p.Sprintf("value is not %s encoded: %v", quote(k.Want), localizedError(k.Err, p))
}
// --
type ContentMediaType struct {
Got []byte
Want string
Err error
}
func (*ContentMediaType) KeywordPath() []string {
return []string{"contentMediaType"}
}
func (k *ContentMediaType) LocalizedString(p *message.Printer) string {
return p.Sprintf("value if not of mediatype %s: %v", quote(k.Want), k.Err)
}
// --
type ContentSchema struct{}
func (*ContentSchema) KeywordPath() []string {
return []string{"contentSchema"}
}
func (*ContentSchema) LocalizedString(p *message.Printer) string {
return p.Sprintf("contentSchema failed")
}
// --
type Minimum struct {
Got *big.Rat
Want *big.Rat
}
func (*Minimum) KeywordPath() []string {
return []string{"minimum"}
}
func (k *Minimum) LocalizedString(p *message.Printer) string {
got, _ := k.Got.Float64()
want, _ := k.Want.Float64()
return p.Sprintf("minimum: got %v, want %v", got, want)
}
// --
type Maximum struct {
Got *big.Rat
Want *big.Rat
}
func (*Maximum) KeywordPath() []string {
return []string{"maximum"}
}
func (k *Maximum) LocalizedString(p *message.Printer) string {
got, _ := k.Got.Float64()
want, _ := k.Want.Float64()
return p.Sprintf("maximum: got %v, want %v", got, want)
}
// --
type ExclusiveMinimum struct {
Got *big.Rat
Want *big.Rat
}
func (*ExclusiveMinimum) KeywordPath() []string {
return []string{"exclusiveMinimum"}
}
func (k *ExclusiveMinimum) LocalizedString(p *message.Printer) string {
got, _ := k.Got.Float64()
want, _ := k.Want.Float64()
return p.Sprintf("exclusiveMinimum: got %v, want %v", got, want)
}
// --
type ExclusiveMaximum struct {
Got *big.Rat
Want *big.Rat
}
func (*ExclusiveMaximum) KeywordPath() []string {
return []string{"exclusiveMaximum"}
}
func (k *ExclusiveMaximum) LocalizedString(p *message.Printer) string {
got, _ := k.Got.Float64()
want, _ := k.Want.Float64()
return p.Sprintf("exclusiveMaximum: got %v, want %v", got, want)
}
// --
type MultipleOf struct {
Got *big.Rat
Want *big.Rat
}
func (*MultipleOf) KeywordPath() []string {
return []string{"multipleOf"}
}
func (k *MultipleOf) LocalizedString(p *message.Printer) string {
got, _ := k.Got.Float64()
want, _ := k.Want.Float64()
return p.Sprintf("multipleOf: got %v, want %v", got, want)
}
// --
func quote(s string) string {
s = fmt.Sprintf("%q", s)
s = strings.ReplaceAll(s, `\"`, `"`)
s = strings.ReplaceAll(s, `'`, `\'`)
return "'" + s[1:len(s)-1] + "'"
}
func joinQuoted(arr []string, sep string) string {
var sb strings.Builder
for _, s := range arr {
if sb.Len() > 0 {
sb.WriteString(sep)
}
sb.WriteString(quote(s))
}
return sb.String()
}
// to be used only for primitive.
func display(v any) string {
switch v := v.(type) {
case string:
return quote(v)
case []any, map[string]any:
return "value"
default:
return fmt.Sprintf("%v", v)
}
}
func localizedError(err error, p *message.Printer) string {
if err, ok := err.(interface{ LocalizedError(*message.Printer) string }); ok {
return err.LocalizedError(p)
}
return err.Error()
}

View file

@ -1,266 +0,0 @@
package jsonschema
import (
"embed"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
gourl "net/url"
"os"
"path/filepath"
"runtime"
"strings"
)
// URLLoader knows how to load json from given url.
type URLLoader interface {
// Load loads json from given absolute url.
Load(url string) (any, error)
}
// --
// FileLoader loads json file url.
type FileLoader struct{}
func (l FileLoader) Load(url string) (any, error) {
path, err := l.ToFile(url)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return UnmarshalJSON(f)
}
// ToFile is helper method to convert file url to file path.
func (l FileLoader) ToFile(url string) (string, error) {
u, err := gourl.Parse(url)
if err != nil {
return "", err
}
if u.Scheme != "file" {
return "", fmt.Errorf("invalid file url: %s", u)
}
path := u.Path
if runtime.GOOS == "windows" {
path = strings.TrimPrefix(path, "/")
path = filepath.FromSlash(path)
}
return path, nil
}
// --
// SchemeURLLoader delegates to other [URLLoaders]
// based on url scheme.
type SchemeURLLoader map[string]URLLoader
func (l SchemeURLLoader) Load(url string) (any, error) {
u, err := gourl.Parse(url)
if err != nil {
return nil, err
}
ll, ok := l[u.Scheme]
if !ok {
return nil, &UnsupportedURLSchemeError{u.String()}
}
return ll.Load(url)
}
// --
//go:embed metaschemas
var metaFS embed.FS
func openMeta(url string) (fs.File, error) {
u, meta := strings.CutPrefix(url, "http://json-schema.org/")
if !meta {
u, meta = strings.CutPrefix(url, "https://json-schema.org/")
}
if meta {
if u == "schema" {
return openMeta(draftLatest.url)
}
f, err := metaFS.Open("metaschemas/" + u)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, nil
}
return nil, err
}
return f, err
}
return nil, nil
}
func isMeta(url string) bool {
f, err := openMeta(url)
if err != nil {
return true
}
if f != nil {
f.Close()
return true
}
return false
}
func loadMeta(url string) (any, error) {
f, err := openMeta(url)
if err != nil {
return nil, err
}
if f == nil {
return nil, nil
}
defer f.Close()
return UnmarshalJSON(f)
}
// --
type defaultLoader struct {
docs map[url]any // docs loaded so far
loader URLLoader
}
func (l *defaultLoader) add(url url, doc any) bool {
if _, ok := l.docs[url]; ok {
return false
}
l.docs[url] = doc
return true
}
func (l *defaultLoader) load(url url) (any, error) {
if doc, ok := l.docs[url]; ok {
return doc, nil
}
doc, err := loadMeta(url.String())
if err != nil {
return nil, err
}
if doc != nil {
l.add(url, doc)
return doc, nil
}
if l.loader == nil {
return nil, &LoadURLError{url.String(), errors.New("no URLLoader set")}
}
doc, err = l.loader.Load(url.String())
if err != nil {
return nil, &LoadURLError{URL: url.String(), Err: err}
}
l.add(url, doc)
return doc, nil
}
func (l *defaultLoader) getDraft(up urlPtr, doc any, defaultDraft *Draft, cycle map[url]struct{}) (*Draft, error) {
obj, ok := doc.(map[string]any)
if !ok {
return defaultDraft, nil
}
sch, ok := strVal(obj, "$schema")
if !ok {
return defaultDraft, nil
}
if draft := draftFromURL(sch); draft != nil {
return draft, nil
}
sch, _ = split(sch)
if _, err := gourl.Parse(sch); err != nil {
return nil, &InvalidMetaSchemaURLError{up.String(), err}
}
schUrl := url(sch)
if up.ptr.isEmpty() && schUrl == up.url {
return nil, &UnsupportedDraftError{schUrl.String()}
}
if _, ok := cycle[schUrl]; ok {
return nil, &MetaSchemaCycleError{schUrl.String()}
}
cycle[schUrl] = struct{}{}
doc, err := l.load(schUrl)
if err != nil {
return nil, err
}
return l.getDraft(urlPtr{schUrl, ""}, doc, defaultDraft, cycle)
}
func (l *defaultLoader) getMetaVocabs(doc any, draft *Draft, vocabularies map[string]*Vocabulary) ([]string, error) {
obj, ok := doc.(map[string]any)
if !ok {
return nil, nil
}
sch, ok := strVal(obj, "$schema")
if !ok {
return nil, nil
}
if draft := draftFromURL(sch); draft != nil {
return nil, nil
}
sch, _ = split(sch)
if _, err := gourl.Parse(sch); err != nil {
return nil, &ParseURLError{sch, err}
}
schUrl := url(sch)
doc, err := l.load(schUrl)
if err != nil {
return nil, err
}
return draft.getVocabs(schUrl, doc, vocabularies)
}
// --
type LoadURLError struct {
URL string
Err error
}
func (e *LoadURLError) Error() string {
return fmt.Sprintf("failing loading %q: %v", e.URL, e.Err)
}
// --
type UnsupportedURLSchemeError struct {
url string
}
func (e *UnsupportedURLSchemeError) Error() string {
return fmt.Sprintf("no URLLoader registered for %q", e.url)
}
// --
type ResourceExistsError struct {
url string
}
func (e *ResourceExistsError) Error() string {
return fmt.Sprintf("resource for %q already exists", e.url)
}
// --
// UnmarshalJSON unmarshals into [any] without losing
// number precision using [json.Number].
func UnmarshalJSON(r io.Reader) (any, error) {
decoder := json.NewDecoder(r)
decoder.UseNumber()
var doc any
if err := decoder.Decode(&doc); err != nil {
return nil, err
}
if _, err := decoder.Token(); err == nil || err != io.EOF {
return nil, fmt.Errorf("invalid character after top-level value")
}
return doc, nil
}

View file

@ -1,151 +0,0 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": { "$ref": "#" }
},
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
},
"simpleTypes": {
"enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
},
"stringArray": {
"type": "array",
"items": { "type": "string" },
"minItems": 1,
"uniqueItems": true
}
},
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uriref"
},
"$schema": {
"type": "string",
"format": "uri"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "boolean",
"default": false
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "boolean",
"default": false
},
"maxLength": { "$ref": "#/definitions/positiveInteger" },
"minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"items": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/schemaArray" }
],
"default": {}
},
"maxItems": { "$ref": "#/definitions/positiveInteger" },
"minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
"uniqueItems": {
"type": "boolean",
"default": false
},
"maxProperties": { "$ref": "#/definitions/positiveInteger" },
"minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
"required": { "$ref": "#/definitions/stringArray" },
"additionalProperties": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"definitions": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/stringArray" }
]
}
},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": true
},
"type": {
"anyOf": [
{ "$ref": "#/definitions/simpleTypes" },
{
"type": "array",
"items": { "$ref": "#/definitions/simpleTypes" },
"minItems": 1,
"uniqueItems": true
}
]
},
"allOf": { "$ref": "#/definitions/schemaArray" },
"anyOf": { "$ref": "#/definitions/schemaArray" },
"oneOf": { "$ref": "#/definitions/schemaArray" },
"not": { "$ref": "#" },
"format": { "type": "string" },
"$ref": { "type": "string" }
},
"dependencies": {
"exclusiveMaximum": [ "maximum" ],
"exclusiveMinimum": [ "minimum" ]
},
"default": {}
}

View file

@ -1,150 +0,0 @@
{
"$schema": "http://json-schema.org/draft-06/schema#",
"$id": "http://json-schema.org/draft-06/schema#",
"title": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": { "$ref": "#" }
},
"nonNegativeInteger": {
"type": "integer",
"minimum": 0
},
"nonNegativeIntegerDefault0": {
"allOf": [
{ "$ref": "#/definitions/nonNegativeInteger" },
{ "default": 0 }
]
},
"simpleTypes": {
"enum": [
"array",
"boolean",
"integer",
"null",
"number",
"object",
"string"
]
},
"stringArray": {
"type": "array",
"items": { "type": "string" },
"uniqueItems": true,
"default": []
}
},
"type": ["object", "boolean"],
"properties": {
"$id": {
"type": "string",
"format": "uri-reference"
},
"$schema": {
"type": "string",
"format": "uri"
},
"$ref": {
"type": "string",
"format": "uri-reference"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"exclusiveMinimum": 0
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "number"
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "number"
},
"maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
"minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": { "$ref": "#" },
"items": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/schemaArray" }
],
"default": {}
},
"maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
"minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
"uniqueItems": {
"type": "boolean",
"default": false
},
"contains": { "$ref": "#" },
"maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
"minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
"required": { "$ref": "#/definitions/stringArray" },
"additionalProperties": { "$ref": "#" },
"definitions": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/stringArray" }
]
}
},
"propertyNames": { "$ref": "#" },
"const": {},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": true
},
"type": {
"anyOf": [
{ "$ref": "#/definitions/simpleTypes" },
{
"type": "array",
"items": { "$ref": "#/definitions/simpleTypes" },
"minItems": 1,
"uniqueItems": true
}
]
},
"format": { "type": "string" },
"allOf": { "$ref": "#/definitions/schemaArray" },
"anyOf": { "$ref": "#/definitions/schemaArray" },
"oneOf": { "$ref": "#/definitions/schemaArray" },
"not": { "$ref": "#" }
},
"default": {}
}

Some files were not shown because too many files have changed in this diff Show more