feat: Add SARIF output support

This commit is contained in:
Mridang Agarwalla 2025-11-04 16:24:45 +07:00
parent e60892483e
commit 94cd001ba0
255 changed files with 60021 additions and 7 deletions

View file

@ -17,10 +17,10 @@ It is inspired by, contains code from and is designed to stay close to
* uses by default a [self-updating fork](https://github.com/yannh/kubernetes-json-schema) of the schemas registry maintained
by the kubernetes-json-schema project - which guarantees
up-to-date **schemas for all recent versions of Kubernetes**.
<details><summary><h4>Speed comparison with Kubeval</h4></summary><p>
Running on a pretty large kubeconfigs setup, on a laptop with 4 cores:
```bash
$ time kubeconform -ignore-missing-schemas -n 8 -summary preview staging production
Summary: 50714 resources found in 35139 files - Valid: 27334, Invalid: 0, Errors: 0 Skipped: 23380
@ -122,7 +122,7 @@ Usage: kubeconform [OPTION]... [FILE OR FOLDER]...
-n int
number of goroutines to run concurrently (default 4)
-output string
output format - json, junit, pretty, tap, text (default "text")
output format - json, junit, pretty, tap, sarif, text (default "text")
-reject string
comma-separated list of kinds or GVKs to reject
-schema-location value
@ -234,9 +234,9 @@ Here are the variables you can use in -schema-location:
### CustomResourceDefinition (CRD) Support
Because Custom Resources (CR) are not native Kubernetes objects, they are not included in the default schema.
Because Custom Resources (CR) are not native Kubernetes objects, they are not included in the default schema.
If your CRs are present in [Datree's CRDs-catalog](https://github.com/datreeio/CRDs-catalog), you can specify this project as an additional registry to lookup:
```bash
# Look in the CRDs-catalog for the desired schema/s
$ kubeconform -schema-location default -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' [MANIFEST]

14
go.mod
View file

@ -4,9 +4,21 @@ go 1.24
require (
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/owenrumney/go-sarif/v3 v3.2.3
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1
github.com/stretchr/testify v1.11.0
golang.org/x/text v0.25.0
gopkg.in/yaml.v2 v2.4.0
sigs.k8s.io/yaml v1.4.0
)
require github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

23
go.sum
View file

@ -1,9 +1,14 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
@ -14,13 +19,31 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/owenrumney/go-sarif/v3 v3.2.3 h1:n6mdX5ugKwCrZInvBsf6WumXmpAe3mbmQXgkXlIq34U=
github.com/owenrumney/go-sarif/v3 v3.2.3/go.mod h1:1bV7t8SZg7pX41spaDkEUs8/yEjzk9JapztMoX1XNjg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8=
github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View file

@ -14,6 +14,8 @@ type Output interface {
func New(w io.Writer, outputFormat string, printSummary, isStdin, verbose bool) (Output, error) {
switch {
case outputFormat == "sarif":
return sarifOutput(w, printSummary, isStdin, verbose), nil
case outputFormat == "json":
return jsonOutput(w, printSummary, isStdin, verbose), nil
case outputFormat == "junit":
@ -25,6 +27,6 @@ func New(w io.Writer, outputFormat string, printSummary, isStdin, verbose bool)
case outputFormat == "text":
return textOutput(w, printSummary, isStdin, verbose), nil
default:
return nil, fmt.Errorf("'outputFormat' must be 'json', 'junit', 'pretty', 'tap' or 'text'")
return nil, fmt.Errorf("'outputFormat' must be 'json', 'junit', 'pretty', 'tap', 'sarif' or 'text'")
}
}

205
pkg/output/sarif.go Normal file
View file

@ -0,0 +1,205 @@
package output
import (
"fmt"
"io"
"sync"
"github.com/owenrumney/go-sarif/v3/pkg/report"
"github.com/owenrumney/go-sarif/v3/pkg/report/v210/sarif"
"github.com/yannh/kubeconform/pkg/resource"
"github.com/yannh/kubeconform/pkg/validator"
)
const (
toolName = "kubeconform"
toolInfoURI = "https://github.com/yannh/kubeconform"
)
const (
ruleIDValid = "KUBE-VALID"
ruleIDInvalid = "KUBE-INVALID"
ruleIDError = "KUBE-ERROR"
ruleIDSkipped = "KUBE-SKIPPED"
)
const (
levelNote = "note"
levelError = "error"
)
var sarifReportingDescriptors = []*sarif.ReportingDescriptor{
newSarifReportingDescriptor(ruleIDValid, "ValidResource", "Resource is valid.", levelNote),
newSarifReportingDescriptor(ruleIDInvalid, "InvalidResource", "Resource is invalid against schema.", levelError),
newSarifReportingDescriptor(ruleIDError, "ProcessingError", "Error processing resource.", levelError),
newSarifReportingDescriptor(ruleIDSkipped, "SkippedResource", "Resource validation was skipped.", levelNote),
}
// newSarifReportingDescriptor creates a new SARIF reporting descriptor.
func newSarifReportingDescriptor(id, name, shortDesc, level string) *sarif.ReportingDescriptor {
shortDescMsg := sarif.NewMultiformatMessageString().WithText(shortDesc)
return sarif.NewRule(id).
WithName(name).
WithShortDescription(shortDescMsg).
WithDefaultConfiguration(sarif.NewReportingConfiguration().WithLevel(level))
}
// sarifOutputter handles the generation of SARIF format output.
// It implements the Output interface and is concurrency-safe.
type sarifOutputter struct {
mu sync.Mutex
writer io.Writer
verbose bool
results []*sarif.Result
}
// sarifOutput creates a new Outputter that formats results as SARIF.
func sarifOutput(writer io.Writer, withSummary, isStdin, verbose bool) Output {
return &sarifOutputter{
writer: writer,
verbose: verbose,
results: make([]*sarif.Result, 0),
}
}
// newSarifRun creates and initializes a new SARIF report and run
// with the standard tool and rule information.
func newSarifRun() (*sarif.Report, *sarif.Run, error) {
rep := report.NewV210Report()
if rep == nil {
return nil, nil, fmt.Errorf("failed to initialize SARIF report")
}
run := sarif.NewRunWithInformationURI(toolName, toolInfoURI)
if run == nil {
return nil, nil, fmt.Errorf("failed to initialize SARIF run")
}
if run.Tool == nil || run.Tool.Driver == nil {
return nil, nil, fmt.Errorf("SARIF run is missing required tool driver information")
}
run.Tool.Driver.WithRules(sarifReportingDescriptors)
rep.AddRun(run)
return rep, run, nil
}
// Write processes a single validation result.
// It is concurrency-safe.
func (so *sarifOutputter) Write(validationResult validator.Result) error {
so.mu.Lock()
defer so.mu.Unlock()
if validationResult.Status == validator.Empty {
return nil
}
if validationResult.Status == validator.Valid && !so.verbose {
return nil
}
signature, _ := validationResult.Resource.Signature()
if validationResult.Status == validator.Invalid && len(validationResult.ValidationErrors) > 0 {
for _, valErr := range validationResult.ValidationErrors {
sarifResult := so.newSarifResult(validationResult, signature, &valErr)
so.results = append(so.results, sarifResult)
}
} else {
sarifResult := so.newSarifResult(validationResult, signature, nil)
so.results = append(so.results, sarifResult)
}
return nil
}
// newSarifResult creates a SARIF result from a validation result.
// If valErr is provided, it populates the result with specific validation
// failure details, including the logical path.
func (so *sarifOutputter) newSarifResult(res validator.Result, sig *resource.Signature, valErr *validator.ValidationError) *sarif.Result {
result := sarif.NewResult().
AddLocation(
sarif.NewLocationWithPhysicalLocation(
sarif.NewPhysicalLocation().
WithArtifactLocation(
sarif.NewSimpleArtifactLocation(res.Resource.Path),
),
),
)
if valErr != nil {
result.Locations[0].AddLogicalLocation(
sarif.NewLogicalLocation().WithName(valErr.Path),
)
result.
WithRuleID(ruleIDInvalid).
WithLevel(levelError)
var message string
if sig.Kind != "" && sig.Name != "" {
message = fmt.Sprintf("%s %s is invalid: %s: %s", sig.Kind, sig.Name, valErr.Path, valErr.Msg)
} else {
message = fmt.Sprintf("%s is invalid: %s: %s", res.Resource.Path, valErr.Path, valErr.Msg)
}
result.WithMessage(sarif.NewTextMessage(message))
} else {
switch res.Status {
case validator.Valid:
result.
WithRuleID(ruleIDValid).
WithLevel(levelNote).
WithMessage(sarif.NewTextMessage(
fmt.Sprintf("%s %s is valid", sig.Kind, sig.Name),
))
case validator.Error:
result.
WithRuleID(ruleIDError).
WithLevel(levelError)
var message string
if sig.Kind != "" && sig.Name != "" {
message = fmt.Sprintf("%s %s failed validation: %s", sig.Kind, sig.Name, res.Err.Error())
} else {
message = fmt.Sprintf("%s failed validation: %s", res.Resource.Path, res.Err.Error())
}
result.WithMessage(sarif.NewTextMessage(message))
case validator.Skipped:
result.
WithRuleID(ruleIDSkipped).
WithLevel(levelNote).
WithMessage(sarif.NewTextMessage(
fmt.Sprintf("%s %s skipped", sig.Kind, sig.Name),
))
default:
result.
WithRuleID(ruleIDError).
WithLevel(levelError).
WithMessage(sarif.NewTextMessage(
fmt.Sprintf("Unknown validation status for %s", res.Resource.Path),
))
}
}
return result
}
// Flush generates the complete SARIF report and writes it to the output writer.
// It is concurrency-safe.
func (so *sarifOutputter) Flush() error {
so.mu.Lock()
defer so.mu.Unlock()
rep, run, err := newSarifRun()
if err != nil {
return err
}
for _, result := range so.results {
run.AddResult(result)
}
if err := rep.PrettyWrite(so.writer); err != nil {
return fmt.Errorf("failed to write SARIF report: %w", err)
}
return nil
}

319
pkg/output/sarif_test.go Normal file
View file

@ -0,0 +1,319 @@
package output
import (
"bytes"
"encoding/json"
"errors"
"testing"
"github.com/owenrumney/go-sarif/v3/pkg/report/v210/sarif"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/yannh/kubeconform/pkg/resource"
"github.com/yannh/kubeconform/pkg/validator"
"gopkg.in/yaml.v2"
)
type testMetadata struct {
Name string `yaml:"name"`
}
type testResource struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
Metadata testMetadata `yaml:"metadata"`
}
func marshalTestResource(t *testing.T, apiVersion, kind, name string) []byte {
res := testResource{
APIVersion: apiVersion,
Kind: kind,
Metadata: testMetadata{Name: name},
}
bytes, err := yaml.Marshal(res)
require.NoError(t, err)
return bytes
}
// newExpectedReport creates a complete, initialized SARIF report
// for use in test assertions.
func newExpectedReport(t *testing.T, results []*sarif.Result) *sarif.Report {
report, run, err := newSarifRun()
require.NoError(t, err)
if results != nil {
run.Results = results
}
report.InlineExternalProperties = nil
report.Properties.Tags = nil
run.Artifacts = nil
for _, rule := range run.Tool.Driver.Rules {
rule.DeprecatedGuids = nil
rule.DeprecatedIds = nil
rule.DeprecatedNames = nil
}
return report
}
func newExpectedResult(ruleID, level, message, path string, logicalPath string) *sarif.Result {
result := sarif.NewResult().
WithRuleID(ruleID).
WithLevel(level).
WithMessage(sarif.NewTextMessage(message))
location := sarif.NewLocationWithPhysicalLocation(
sarif.NewPhysicalLocation().
WithArtifactLocation(
sarif.NewSimpleArtifactLocation(path),
),
)
if logicalPath != "" {
location.AddLogicalLocation(sarif.NewLogicalLocation().WithName(logicalPath))
}
result.AddLocation(location)
result.Suppressions = nil
result.WorkItemUris = nil
return result
}
func TestSarifWrite(t *testing.T) {
testCases := []struct {
name string
verbose bool
results []validator.Result
expectedReport *sarif.Report
}{
{
name: "single invalid deployment",
verbose: false,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: marshalTestResource(t, "apps/v1", "Deployment", "my-app"),
},
Status: validator.Invalid,
Err: errors.New("spec.replicas: Invalid type. Expected: [integer,null], given: string"),
ValidationErrors: []validator.ValidationError{
{Path: "spec.replicas", Msg: "Invalid type. Expected: [integer,null], given: string"},
},
},
},
expectedReport: newExpectedReport(t, []*sarif.Result{
newExpectedResult(
ruleIDInvalid,
levelError,
"Deployment my-app is invalid: spec.replicas: Invalid type. Expected: [integer,null], given: string",
"deployment.yml",
"spec.replicas",
),
}),
},
{
name: "single valid deployment verbose",
verbose: true,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: marshalTestResource(t, "apps/v1", "Deployment", "my-app"),
},
Status: validator.Valid,
Err: nil,
},
},
expectedReport: newExpectedReport(t, []*sarif.Result{
newExpectedResult(
ruleIDValid,
levelNote,
"Deployment my-app is valid",
"deployment.yml",
"",
),
}),
},
{
name: "single valid deployment non-verbose",
verbose: false,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: marshalTestResource(t, "apps/v1", "Deployment", "my-app"),
},
Status: validator.Valid,
Err: nil,
},
},
expectedReport: newExpectedReport(t, nil),
},
{
name: "skipped resource",
verbose: true,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "service.yml",
Bytes: marshalTestResource(t, "v1", "Service", "my-service"),
},
Status: validator.Skipped,
Err: nil,
},
},
expectedReport: newExpectedReport(t, []*sarif.Result{
newExpectedResult(
ruleIDSkipped,
levelNote,
"Service my-service skipped",
"service.yml",
"",
),
}),
},
{
name: "error processing resource",
verbose: false,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "configmap.yml",
Bytes: marshalTestResource(t, "v1", "ConfigMap", "my-config"),
},
Status: validator.Error,
Err: errors.New("failed to download schema"),
},
},
expectedReport: newExpectedReport(t, []*sarif.Result{
newExpectedResult(
ruleIDError,
levelError,
"ConfigMap my-config failed validation: failed to download schema",
"configmap.yml",
"",
),
}),
},
{
name: "empty resource filtered out",
verbose: true,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "empty.yml",
Bytes: []byte(`---`),
},
Status: validator.Empty,
Err: nil,
},
},
expectedReport: newExpectedReport(t, nil),
},
{
name: "multiple invalid results from one file",
verbose: false,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "deployment1.yml",
Bytes: marshalTestResource(t, "apps/v1", "Deployment", "app1"),
},
Status: validator.Invalid,
ValidationErrors: []validator.ValidationError{
{Path: "spec.template", Msg: "is missing"},
{Path: "spec.selector", Msg: "is missing"},
},
},
{
Resource: resource.Resource{
Path: "deployment2.yml",
Bytes: marshalTestResource(t, "apps/v1", "Deployment", "app2"),
},
Status: validator.Invalid,
ValidationErrors: []validator.ValidationError{
{Path: "spec.replicas", Msg: "must be positive"},
},
},
},
expectedReport: newExpectedReport(t, []*sarif.Result{
newExpectedResult(
ruleIDInvalid,
levelError,
"Deployment app1 is invalid: spec.template: is missing",
"deployment1.yml",
"spec.template",
),
newExpectedResult(
ruleIDInvalid,
levelError,
"Deployment app1 is invalid: spec.selector: is missing",
"deployment1.yml",
"spec.selector",
),
newExpectedResult(
ruleIDInvalid,
levelError,
"Deployment app2 is invalid: spec.replicas: must be positive",
"deployment2.yml",
"spec.replicas",
),
}),
},
{
name: "invalid resource with no signature",
verbose: false,
results: []validator.Result{
{
Resource: resource.Resource{
Path: "not-yaml.yml",
Bytes: []byte(`not: valid: yaml`),
},
Status: validator.Invalid,
ValidationErrors: []validator.ValidationError{
{Path: "metadata", Msg: "is missing"},
},
},
},
expectedReport: newExpectedReport(t, []*sarif.Result{
newExpectedResult(
ruleIDInvalid,
levelError,
"not-yaml.yml is invalid: metadata: is missing",
"not-yaml.yml",
"metadata",
),
}),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
w := new(bytes.Buffer)
o := sarifOutput(w, false, false, tc.verbose)
for _, res := range tc.results {
err := o.Write(res)
require.NoError(t, err, "Write() should not return an error")
}
err := o.Flush()
require.NoError(t, err, "Flush() should not return an error")
var actualReport sarif.Report
err = json.Unmarshal(w.Bytes(), &actualReport)
require.NoError(t, err, "Output should be valid JSON: %s", w.String())
require.Len(t, actualReport.Runs, 1)
assert.Equal(t, tc.expectedReport.Version, actualReport.Version)
assert.Equal(t, tc.expectedReport.Schema, actualReport.Schema)
assert.Equal(t, tc.expectedReport.Properties, actualReport.Properties)
assert.Equal(t, tc.expectedReport.Runs[0].Tool, actualReport.Runs[0].Tool)
assert.ElementsMatch(t, tc.expectedReport.Runs[0].Results, actualReport.Runs[0].Results)
})
}
}

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View file

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

145
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
type flag uintptr
var (
// flagRO indicates whether the value field of a reflect.Value
// is read-only.
flagRO flag
// flagAddr indicates whether the address of the reflect.Value's
// value may be taken.
flagAddr flag
)
// flagKindMask holds the bits that make up the kind
// part of the flags field. In all the supported versions,
// it is in the lower 5 bits.
const flagKindMask = flag(0x1f)
// Different versions of Go have used different
// bit layouts for the flags type. This table
// records the known combinations.
var okFlags = []struct {
ro, addr flag
}{{
// From Go 1.4 to 1.5
ro: 1 << 5,
addr: 1 << 7,
}, {
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) reflect.Value {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
return v
}
flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
*flagFieldPtr |= flagAddr
return v
}
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
panic("reflect.Value read-only flag has changed semantics")
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe !go1.4
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View file

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View file

@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View file

@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View file

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound:
d.w.Write(nilAngleBytes)
case cycleFound:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View file

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound:
f.fs.Write(nilAngleBytes)
case cycleFound:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

41
vendor/github.com/google/uuid/CHANGELOG.md generated vendored Normal file
View file

@ -0,0 +1,41 @@
# Changelog
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
### Features
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
### Bug Fixes
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
### Features
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
### Features
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
### Fixes
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
### Bug Fixes
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
## Changelog

26
vendor/github.com/google/uuid/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,26 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Tips
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
Always try to include a test case! If it is not possible or not necessary,
please explain why in the pull request description.
### Releasing
Commits that would precipitate a SemVer change, as described in the Conventional
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
to create a release candidate pull request. Once submitted, `release-please`
will create a release.
For tips on how to work with `release-please`, see its documentation.
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

9
vendor/github.com/google/uuid/CONTRIBUTORS generated vendored Normal file
View file

@ -0,0 +1,9 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

27
vendor/github.com/google/uuid/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

21
vendor/github.com/google/uuid/README.md generated vendored Normal file
View file

@ -0,0 +1,21 @@
# uuid
The uuid package generates and inspects UUIDs based on
[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
```sh
go get github.com/google/uuid
```
###### Documentation
[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://pkg.go.dev/github.com/google/uuid

80
vendor/github.com/google/uuid/dce.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
vendor/github.com/google/uuid/doc.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

59
vendor/github.com/google/uuid/hash.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
Max = UUID{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
}
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:]) //nolint:errcheck
h.Write(data) //nolint:errcheck
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

38
vendor/github.com/google/uuid/marshal.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
return err
}
*uuid = id
return nil
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

90
vendor/github.com/google/uuid/node.go generated vendored Normal file
View file

@ -0,0 +1,90 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"sync"
)
var (
nodeMu sync.Mutex
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
iname, addr := getHardwareInterface(name) // null implementation for js
if iname != "" && addr != nil {
ifname = iname
copy(nodeID[:], addr)
return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
ifname = "random"
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

12
vendor/github.com/google/uuid/node_js.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }

33
vendor/github.com/google/uuid/node_net.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package uuid
import "net"
var interfaces []net.Interface // cached list of interfaces
// getHardwareInterface returns the name and hardware address of interface name.
// If name is "" then the name and hardware address of one of the system's
// interfaces is returned. If no interfaces are found (name does not exist or
// there are no interfaces) then "", nil is returned.
//
// Only addresses of at least 6 bytes are returned.
func getHardwareInterface(name string) (string, []byte) {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil {
return "", nil
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
return ifs.Name, ifs.HardwareAddr
}
}
return "", nil
}

118
vendor/github.com/google/uuid/null.go generated vendored Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2021 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"database/sql/driver"
"encoding/json"
"fmt"
)
var jsonNull = []byte("null")
// NullUUID represents a UUID that may be null.
// NullUUID implements the SQL driver.Scanner interface so
// it can be used as a scan destination:
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
//
type NullUUID struct {
UUID UUID
Valid bool // Valid is true if UUID is not NULL
}
// Scan implements the SQL driver.Scanner interface.
func (nu *NullUUID) Scan(value interface{}) error {
if value == nil {
nu.UUID, nu.Valid = Nil, false
return nil
}
err := nu.UUID.Scan(value)
if err != nil {
nu.Valid = false
return err
}
nu.Valid = true
return nil
}
// Value implements the driver Valuer interface.
func (nu NullUUID) Value() (driver.Value, error) {
if !nu.Valid {
return nil, nil
}
// Delegate to UUID Value function
return nu.UUID.Value()
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (nu NullUUID) MarshalBinary() ([]byte, error) {
if nu.Valid {
return nu.UUID[:], nil
}
return []byte(nil), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(nu.UUID[:], data)
nu.Valid = true
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (nu NullUUID) MarshalText() ([]byte, error) {
if nu.Valid {
return nu.UUID.MarshalText()
}
return jsonNull, nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (nu *NullUUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
nu.Valid = false
return err
}
nu.UUID = id
nu.Valid = true
return nil
}
// MarshalJSON implements json.Marshaler.
func (nu NullUUID) MarshalJSON() ([]byte, error) {
if nu.Valid {
return json.Marshal(nu.UUID)
}
return jsonNull, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonNull) {
*nu = NullUUID{}
return nil // valid null UUID
}
err := json.Unmarshal(data, &nu.UUID)
nu.Valid = err == nil
return err
}

59
vendor/github.com/google/uuid/sql.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case string:
// if an empty UUID comes from a table, we return a null UUID
if src == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
// if an empty UUID comes from a table, we return a null UUID
if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(src) != 16 {
return uuid.Scan(string(src))
}
copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

134
vendor/github.com/google/uuid/time.go generated vendored Normal file
View file

@ -0,0 +1,134 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if oldSeq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
var t Time
switch uuid.Version() {
case 6:
time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
t = Time(time)
case 7:
time := binary.BigEndian.Uint64(uuid[:8])
t = Time((time>>16)*10000 + g1582ns100)
default: // forward compatible
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
t = Time(time)
}
return t
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

43
vendor/github.com/google/uuid/util.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts hex characters x1 and x2 into a byte.
func xtob(x1, x2 byte) (byte, bool) {
b1 := xvalues[x1]
b2 := xvalues[x2]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

365
vendor/github.com/google/uuid/uuid.go generated vendored Normal file
View file

@ -0,0 +1,365 @@
// Copyright 2018 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
"sync"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID [16]byte
// A Version represents a UUID's version.
type Version byte
// A Variant represents a UUID's variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
const randPoolSize = 16 * 16
var (
rander = rand.Reader // random function
poolEnabled = false
poolMu sync.Mutex
poolPos = randPoolSize // protected with poolMu
pool [randPoolSize]byte // protected with poolMu
)
type invalidLengthError struct{ len int }
func (err invalidLengthError) Error() string {
return fmt.Sprintf("invalid UUID length: %d", err.len)
}
// IsInvalidLengthError is matcher function for custom error invalidLengthError
func IsInvalidLengthError(err error) bool {
_, ok := err.(invalidLengthError)
return ok
}
// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
// the standard UUID forms defined in RFC 4122
// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
// Parse accepts non-standard strings such as the raw hex encoding
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
// examined in the latter case. Parse should not be used to validate strings as
// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36:
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
case 36 + 2:
s = s[1:]
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
case 32:
var ok bool
for i := range uuid {
uuid[i], ok = xtob(s[i*2], s[i*2+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, invalidLengthError{len(s)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34,
} {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
b = b[1:]
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
var ok bool
for i := 0; i < 32; i += 2 {
uuid[i/2], ok = xtob(b[i], b[i+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, invalidLengthError{len(b)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34,
} {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// MustParse is like Parse but panics if the string cannot be parsed.
// It simplifies safe initialization of global variables holding compiled UUIDs.
func MustParse(s string) UUID {
uuid, err := Parse(s)
if err != nil {
panic(`uuid: Parse(` + s + `): ` + err.Error())
}
return uuid
}
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
// does not have a length of 16. The bytes are copied from the slice.
func FromBytes(b []byte) (uuid UUID, err error) {
err = uuid.UnmarshalBinary(b)
return uuid, err
}
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
panic(err)
}
return uuid
}
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
// It returns an error if the format is invalid, otherwise nil.
func Validate(s string) error {
switch len(s) {
// Standard UUID format
case 36:
// UUID with "urn:uuid:" prefix
case 36 + 9:
if !strings.EqualFold(s[:9], "urn:uuid:") {
return fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// UUID enclosed in braces
case 36 + 2:
if s[0] != '{' || s[len(s)-1] != '}' {
return fmt.Errorf("invalid bracketed UUID format")
}
s = s[1 : len(s)-1]
// UUID without hyphens
case 32:
for i := 0; i < len(s); i += 2 {
_, ok := xtob(s[i], s[i+1])
if !ok {
return errors.New("invalid UUID format")
}
}
default:
return invalidLengthError{len(s)}
}
// Check for standard UUID format
if len(s) == 36 {
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return errors.New("invalid UUID format")
}
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
if _, ok := xtob(s[x], s[x+1]); !ok {
return errors.New("invalid UUID format")
}
}
}
return nil
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Variant returns the variant encoded in uuid.
func (uuid UUID) Variant() Variant {
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
}
// Version returns the version of uuid.
func (uuid UUID) Version() Version {
return Version(uuid[6] >> 4)
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implements io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}
// EnableRandPool enables internal randomness pool used for Random
// (Version 4) UUID generation. The pool contains random bytes read from
// the random number generator on demand in batches. Enabling the pool
// may improve the UUID generation throughput significantly.
//
// Since the pool is stored on the Go heap, this feature may be a bad fit
// for security sensitive applications.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func EnableRandPool() {
poolEnabled = true
}
// DisableRandPool disables the randomness pool if it was previously
// enabled with EnableRandPool.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func DisableRandPool() {
poolEnabled = false
defer poolMu.Unlock()
poolMu.Lock()
poolPos = randPoolSize
}
// UUIDs is a slice of UUID types.
type UUIDs []UUID
// Strings returns a string slice containing the string form of each UUID in uuids.
func (uuids UUIDs) Strings() []string {
var uuidStrs = make([]string, len(uuids))
for i, uuid := range uuids {
uuidStrs[i] = uuid.String()
}
return uuidStrs
}

44
vendor/github.com/google/uuid/version1.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
timeLow := uint32(now & 0xffffffff)
timeMid := uint16((now >> 32) & 0xffff)
timeHi := uint16((now >> 48) & 0x0fff)
timeHi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], timeLow)
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

76
vendor/github.com/google/uuid/version4.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// Uses the randomness pool if it was enabled with EnableRandPool.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
if !poolEnabled {
return NewRandomFromReader(rander)
}
return newRandomFromPool()
}
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
func NewRandomFromReader(r io.Reader) (UUID, error) {
var uuid UUID
_, err := io.ReadFull(r, uuid[:])
if err != nil {
return Nil, err
}
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}
func newRandomFromPool() (UUID, error) {
var uuid UUID
poolMu.Lock()
if poolPos == randPoolSize {
_, err := io.ReadFull(rander, pool[:])
if err != nil {
poolMu.Unlock()
return Nil, err
}
poolPos = 0
}
copy(uuid[:], pool[poolPos:(poolPos+16)])
poolPos += 16
poolMu.Unlock()
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

56
vendor/github.com/google/uuid/version6.go generated vendored Normal file
View file

@ -0,0 +1,56 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "encoding/binary"
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
//
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewV6 returns Nil and an error.
func NewV6() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_high |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_mid | time_low_and_version |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|clk_seq_hi_res | clk_seq_low | node (0-1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| node (2-5) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
binary.BigEndian.PutUint16(uuid[8:], seq)
uuid[6] = 0x60 | (uuid[6] & 0x0F)
uuid[8] = 0x80 | (uuid[8] & 0x3F)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

104
vendor/github.com/google/uuid/version7.go generated vendored Normal file
View file

@ -0,0 +1,104 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// UUID version 7 features a time-ordered value field derived from the widely
// implemented and well known Unix Epoch timestamp source,
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
// As well as improved entropy characteristics over versions 1 or 6.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
//
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
//
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
// Uses the randomness pool if it was enabled with EnableRandPool.
// On error, NewV7 returns Nil and an error
func NewV7() (UUID, error) {
uuid, err := NewRandom()
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
// it use NewRandomFromReader fill random bits.
// On error, NewV7FromReader returns Nil and an error.
func NewV7FromReader(r io.Reader) (UUID, error) {
uuid, err := NewRandomFromReader(r)
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
// uuid[8] already has the right version number (Variant is 10)
// see function NewV7 and NewV7FromReader
func makeV7(uuid []byte) {
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a (12 bit seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
_ = uuid[15] // bounds check
t, s := getV7Time()
uuid[0] = byte(t >> 40)
uuid[1] = byte(t >> 32)
uuid[2] = byte(t >> 24)
uuid[3] = byte(t >> 16)
uuid[4] = byte(t >> 8)
uuid[5] = byte(t)
uuid[6] = 0x70 | (0x0F & byte(s>>8))
uuid[7] = byte(s)
}
// lastV7time is the last time we returned stored as:
//
// 52 bits of time in milliseconds since epoch
// 12 bits of (fractional nanoseconds) >> 8
var lastV7time int64
const nanoPerMilli = 1000000
// getV7Time returns the time in milliseconds and nanoseconds / 256.
// The returned (milli << 12 + seq) is guarenteed to be greater than
// (milli << 12 + seq) returned by any previous call to getV7Time.
func getV7Time() (milli, seq int64) {
timeMu.Lock()
defer timeMu.Unlock()
nano := timeNow().UnixNano()
milli = nano / nanoPerMilli
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
seq = (nano - milli*nanoPerMilli) >> 8
now := milli<<12 + seq
if now <= lastV7time {
now = lastV7time + 1
milli = now >> 12
seq = now & 0xfff
}
lastV7time = now
return milli, seq
}

24
vendor/github.com/owenrumney/go-sarif/v3/LICENSE generated vendored Normal file
View file

@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>

View file

@ -0,0 +1,16 @@
package report
import (
v210 "github.com/owenrumney/go-sarif/v3/pkg/report/v210/sarif"
v22 "github.com/owenrumney/go-sarif/v3/pkg/report/v22/sarif"
)
type Version string
func NewV210Report() *v210.Report {
return v210.NewReport()
}
func NewV22Report() *v22.Report {
return v22.NewReport()
}

View file

@ -0,0 +1,3 @@
package utils
var DefaultLengthInt int = -1

View file

@ -0,0 +1,103 @@
package sarif
// Address - A physical or virtual address, or a range of addresses, in an 'addressable region' (memory or a binary file).
type Address struct {
// The address expressed as a byte offset from the start of the addressable region.
AbsoluteAddress int `json:"absoluteAddress"`
// A human-readable fully qualified name that is associated with the address.
FullyQualifiedName *string `json:"fullyQualifiedName,omitempty"`
// The index within run.addresses of the cached object for this address.
Index int `json:"index"`
// An open-ended string that identifies the address kind. 'data', 'function', 'header','instruction', 'module', 'page', 'section', 'segment', 'stack', 'stackFrame', 'table' are well-known values.
Kind *string `json:"kind,omitempty"`
// The number of bytes in this range of addresses.
Length *int `json:"length,omitempty"`
// A name that is associated with the address, e.g., '.text'.
Name *string `json:"name,omitempty"`
// The byte offset of this address from the absolute or relative address of the parent object.
OffsetFromParent *int `json:"offsetFromParent,omitempty"`
// The index within run.addresses of the parent object.
ParentIndex int `json:"parentIndex"`
// Key/value pairs that provide additional information about the address.
Properties *PropertyBag `json:"properties,omitempty"`
// The address expressed as a byte offset from the absolute address of the top-most parent object.
RelativeAddress *int `json:"relativeAddress,omitempty"`
}
// NewAddress - creates a new
func NewAddress() *Address {
return &Address{
AbsoluteAddress: -1,
Index: -1,
ParentIndex: -1,
}
}
// WithAbsoluteAddress - add a AbsoluteAddress to the Address
func (a *Address) WithAbsoluteAddress(absoluteAddress int) *Address {
a.AbsoluteAddress = absoluteAddress
return a
}
// WithFullyQualifiedName - add a FullyQualifiedName to the Address
func (f *Address) WithFullyQualifiedName(fullyQualifiedName string) *Address {
f.FullyQualifiedName = &fullyQualifiedName
return f
}
// WithIndex - add a Index to the Address
func (i *Address) WithIndex(index int) *Address {
i.Index = index
return i
}
// WithKind - add a Kind to the Address
func (k *Address) WithKind(kind string) *Address {
k.Kind = &kind
return k
}
// WithLength - add a Length to the Address
func (l *Address) WithLength(length int) *Address {
l.Length = &length
return l
}
// WithName - add a Name to the Address
func (n *Address) WithName(name string) *Address {
n.Name = &name
return n
}
// WithOffsetFromParent - add a OffsetFromParent to the Address
func (o *Address) WithOffsetFromParent(offsetFromParent int) *Address {
o.OffsetFromParent = &offsetFromParent
return o
}
// WithParentIndex - add a ParentIndex to the Address
func (p *Address) WithParentIndex(parentIndex int) *Address {
p.ParentIndex = parentIndex
return p
}
// WithProperties - add a Properties to the Address
func (p *Address) WithProperties(properties *PropertyBag) *Address {
p.Properties = properties
return p
}
// WithRelativeAddress - add a RelativeAddress to the Address
func (r *Address) WithRelativeAddress(relativeAddress int) *Address {
r.RelativeAddress = &relativeAddress
return r
}

View file

@ -0,0 +1,142 @@
package sarif
// Artifact - A single artifact. In some cases, this artifact might be nested within another artifact.
type Artifact struct {
// A dictionary, each of whose keys is the name of a hash function and each of whose values is the hashed value of the artifact produced by the specified hash function.
Hashes map[string]string `json:"hashes,omitempty"`
// The contents of the artifact.
Contents *ArtifactContent `json:"contents,omitempty"`
// A short description of the artifact.
Description *Message `json:"description,omitempty"`
// Specifies the encoding for an artifact object that refers to a text file.
Encoding *string `json:"encoding,omitempty"`
// The Coordinated Universal Time (UTC) date and time at which the artifact was most recently modified. See "Date/time properties" in the SARIF spec for the required format.
LastModifiedTimeUtc *string `json:"lastModifiedTimeUtc,omitempty"`
// The length of the artifact in bytes.
Length int `json:"length"`
// The location of the artifact.
Location *ArtifactLocation `json:"location,omitempty"`
// The MIME type (RFC 2045) of the artifact.
MimeType *string `json:"mimeType,omitempty"`
// The offset in bytes of the artifact within its containing artifact.
Offset *int `json:"offset,omitempty"`
// Identifies the index of the immediate parent of the artifact, if this artifact is nested.
ParentIndex int `json:"parentIndex"`
// Key/value pairs that provide additional information about the artifact.
Properties *PropertyBag `json:"properties,omitempty"`
// The role or roles played by the artifact in the analysis.
Roles []string `json:"roles"`
// Specifies the source language for any artifact object that refers to a text file that contains source code.
SourceLanguage *string `json:"sourceLanguage,omitempty"`
}
// NewArtifact - creates a new
func NewArtifact() *Artifact {
return &Artifact{
Length: -1,
ParentIndex: -1,
Roles: make([]string, 0),
}
}
// AddHash - add a single Hash to the Artifact
func (h *Artifact) AddHash(key, hash string) *Artifact {
h.Hashes[key] = hash
return h
}
// WithHashes - add a Hashes to the Artifact
func (h *Artifact) WithHashes(hashes map[string]string) *Artifact {
h.Hashes = hashes
return h
}
// WithContents - add a Contents to the Artifact
func (c *Artifact) WithContents(contents *ArtifactContent) *Artifact {
c.Contents = contents
return c
}
// WithDescription - add a Description to the Artifact
func (d *Artifact) WithDescription(description *Message) *Artifact {
d.Description = description
return d
}
// WithEncoding - add a Encoding to the Artifact
func (e *Artifact) WithEncoding(encoding string) *Artifact {
e.Encoding = &encoding
return e
}
// WithLastModifiedTimeUtc - add a LastModifiedTimeUtc to the Artifact
func (l *Artifact) WithLastModifiedTimeUtc(lastModifiedTimeUtc string) *Artifact {
l.LastModifiedTimeUtc = &lastModifiedTimeUtc
return l
}
// WithLength - add a Length to the Artifact
func (l *Artifact) WithLength(length int) *Artifact {
l.Length = length
return l
}
// WithLocation - add a Location to the Artifact
func (l *Artifact) WithLocation(location *ArtifactLocation) *Artifact {
l.Location = location
return l
}
// WithMimeType - add a MimeType to the Artifact
func (m *Artifact) WithMimeType(mimeType string) *Artifact {
m.MimeType = &mimeType
return m
}
// WithOffset - add a Offset to the Artifact
func (o *Artifact) WithOffset(offset int) *Artifact {
o.Offset = &offset
return o
}
// WithParentIndex - add a ParentIndex to the Artifact
func (p *Artifact) WithParentIndex(parentIndex int) *Artifact {
p.ParentIndex = parentIndex
return p
}
// WithProperties - add a Properties to the Artifact
func (p *Artifact) WithProperties(properties *PropertyBag) *Artifact {
p.Properties = properties
return p
}
// WithRoles - add a Roles to the Artifact
func (r *Artifact) WithRoles(roles []string) *Artifact {
r.Roles = roles
return r
}
// AddRole - add a single Role to the Artifact
func (r *Artifact) AddRole(role string) *Artifact {
r.Roles = append(r.Roles, role)
return r
}
// WithSourceLanguage - add a SourceLanguage to the Artifact
func (s *Artifact) WithSourceLanguage(sourceLanguage string) *Artifact {
s.SourceLanguage = &sourceLanguage
return s
}

View file

@ -0,0 +1,44 @@
package sarif
// ArtifactChange - A change to a single artifact.
type ArtifactChange struct {
// The location of the artifact to change.
ArtifactLocation *ArtifactLocation `json:"artifactLocation,omitempty"`
// Key/value pairs that provide additional information about the change.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of replacement objects, each of which represents the replacement of a single region in a single artifact specified by 'artifactLocation'.
Replacements []*Replacement `json:"replacements,omitempty"`
}
// NewArtifactChange - creates a new
func NewArtifactChange() *ArtifactChange {
return &ArtifactChange{
Replacements: make([]*Replacement, 0),
}
}
// WithArtifactLocation - add a ArtifactLocation to the ArtifactChange
func (a *ArtifactChange) WithArtifactLocation(artifactLocation *ArtifactLocation) *ArtifactChange {
a.ArtifactLocation = artifactLocation
return a
}
// WithProperties - add a Properties to the ArtifactChange
func (p *ArtifactChange) WithProperties(properties *PropertyBag) *ArtifactChange {
p.Properties = properties
return p
}
// WithReplacements - add a Replacements to the ArtifactChange
func (r *ArtifactChange) WithReplacements(replacements []*Replacement) *ArtifactChange {
r.Replacements = replacements
return r
}
// AddReplacement - add a single Replacement to the ArtifactChange
func (r *ArtifactChange) AddReplacement(replacement *Replacement) *ArtifactChange {
r.Replacements = append(r.Replacements, replacement)
return r
}

View file

@ -0,0 +1,45 @@
package sarif
// ArtifactContent - Represents the contents of an artifact.
type ArtifactContent struct {
// MIME Base64-encoded content from a binary artifact, or from a text artifact in its original encoding.
Binary *string `json:"binary,omitempty"`
// Key/value pairs that provide additional information about the artifact content.
Properties *PropertyBag `json:"properties,omitempty"`
// An alternate rendered representation of the artifact (e.g., a decompiled representation of a binary region).
Rendered *MultiformatMessageString `json:"rendered,omitempty"`
// UTF-8-encoded content from a text artifact.
Text *string `json:"text,omitempty"`
}
// NewArtifactContent - creates a new
func NewArtifactContent() *ArtifactContent {
return &ArtifactContent{}
}
// WithBinary - add a Binary to the ArtifactContent
func (b *ArtifactContent) WithBinary(binary string) *ArtifactContent {
b.Binary = &binary
return b
}
// WithProperties - add a Properties to the ArtifactContent
func (p *ArtifactContent) WithProperties(properties *PropertyBag) *ArtifactContent {
p.Properties = properties
return p
}
// WithRendered - add a Rendered to the ArtifactContent
func (r *ArtifactContent) WithRendered(rendered *MultiformatMessageString) *ArtifactContent {
r.Rendered = rendered
return r
}
// WithText - add a Text to the ArtifactContent
func (t *ArtifactContent) WithText(text string) *ArtifactContent {
t.Text = &text
return t
}

View file

@ -0,0 +1,56 @@
package sarif
// ArtifactLocation - Specifies the location of an artifact.
type ArtifactLocation struct {
// A short description of the artifact location.
Description *Message `json:"description,omitempty"`
// The index within the run artifacts array of the artifact object associated with the artifact location.
Index int `json:"index"`
// Key/value pairs that provide additional information about the artifact location.
Properties *PropertyBag `json:"properties,omitempty"`
// A string containing a valid relative or absolute URI.
URI *string `json:"uri,omitempty"`
// A string which indirectly specifies the absolute URI with respect to which a relative URI in the "uri" property is interpreted.
URIBaseID *string `json:"uriBaseId,omitempty"`
}
// NewArtifactLocation - creates a new
func NewArtifactLocation() *ArtifactLocation {
return &ArtifactLocation{
Index: -1,
}
}
// WithDescription - add a Description to the ArtifactLocation
func (d *ArtifactLocation) WithDescription(description *Message) *ArtifactLocation {
d.Description = description
return d
}
// WithIndex - add a Index to the ArtifactLocation
func (i *ArtifactLocation) WithIndex(index int) *ArtifactLocation {
i.Index = index
return i
}
// WithProperties - add a Properties to the ArtifactLocation
func (p *ArtifactLocation) WithProperties(properties *PropertyBag) *ArtifactLocation {
p.Properties = properties
return p
}
// WithURI - add a URI to the ArtifactLocation
func (u *ArtifactLocation) WithURI(uri string) *ArtifactLocation {
u.URI = &uri
return u
}
// WithURIBaseID - add a URIBaseID to the ArtifactLocation
func (u *ArtifactLocation) WithURIBaseID(uriBaseId string) *ArtifactLocation {
u.URIBaseID = &uriBaseId
return u
}

View file

@ -0,0 +1,6 @@
package sarif
// NewSimpleArtifactLocation creates a new SimpleArtifactLocation and returns a pointer to it
func NewSimpleArtifactLocation(uri string) *ArtifactLocation {
return NewArtifactLocation().WithURI(uri)
}

View file

@ -0,0 +1,69 @@
package sarif
// Attachment - An artifact relevant to a result.
type Attachment struct {
// The location of the attachment.
ArtifactLocation *ArtifactLocation `json:"artifactLocation,omitempty"`
// A message describing the role played by the attachment.
Description *Message `json:"description,omitempty"`
// Key/value pairs that provide additional information about the attachment.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of rectangles specifying areas of interest within the image.
Rectangles []*Rectangle `json:"rectangles"`
// An array of regions of interest within the attachment.
Regions []*Region `json:"regions"`
}
// NewAttachment - creates a new
func NewAttachment() *Attachment {
return &Attachment{
Rectangles: make([]*Rectangle, 0),
Regions: make([]*Region, 0),
}
}
// WithArtifactLocation - add a ArtifactLocation to the Attachment
func (a *Attachment) WithArtifactLocation(artifactLocation *ArtifactLocation) *Attachment {
a.ArtifactLocation = artifactLocation
return a
}
// WithDescription - add a Description to the Attachment
func (d *Attachment) WithDescription(description *Message) *Attachment {
d.Description = description
return d
}
// WithProperties - add a Properties to the Attachment
func (p *Attachment) WithProperties(properties *PropertyBag) *Attachment {
p.Properties = properties
return p
}
// WithRectangles - add a Rectangles to the Attachment
func (r *Attachment) WithRectangles(rectangles []*Rectangle) *Attachment {
r.Rectangles = rectangles
return r
}
// AddRectangle - add a single Rectangle to the Attachment
func (r *Attachment) AddRectangle(rectangle *Rectangle) *Attachment {
r.Rectangles = append(r.Rectangles, rectangle)
return r
}
// WithRegions - add a Regions to the Attachment
func (r *Attachment) WithRegions(regions []*Region) *Attachment {
r.Regions = regions
return r
}
// AddRegion - add a single Region to the Attachment
func (r *Attachment) AddRegion(region *Region) *Attachment {
r.Regions = append(r.Regions, region)
return r
}

View file

@ -0,0 +1,44 @@
package sarif
// CodeFlow - A set of threadFlows which together describe a pattern of code execution relevant to detecting a result.
type CodeFlow struct {
// A message relevant to the code flow.
Message *Message `json:"message,omitempty"`
// Key/value pairs that provide additional information about the code flow.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of one or more unique threadFlow objects, each of which describes the progress of a program through a thread of execution.
ThreadFlows []*ThreadFlow `json:"threadFlows,omitempty"`
}
// NewCodeFlow - creates a new
func NewCodeFlow() *CodeFlow {
return &CodeFlow{
ThreadFlows: make([]*ThreadFlow, 0),
}
}
// WithMessage - add a Message to the CodeFlow
func (m *CodeFlow) WithMessage(message *Message) *CodeFlow {
m.Message = message
return m
}
// WithProperties - add a Properties to the CodeFlow
func (p *CodeFlow) WithProperties(properties *PropertyBag) *CodeFlow {
p.Properties = properties
return p
}
// WithThreadFlows - add a ThreadFlows to the CodeFlow
func (t *CodeFlow) WithThreadFlows(threadFlows []*ThreadFlow) *CodeFlow {
t.ThreadFlows = threadFlows
return t
}
// AddThreadFlow - add a single ThreadFlow to the CodeFlow
func (t *CodeFlow) AddThreadFlow(threadFlow *ThreadFlow) *CodeFlow {
t.ThreadFlows = append(t.ThreadFlows, threadFlow)
return t
}

View file

@ -0,0 +1,36 @@
package sarif
// ConfigurationOverride - Information about how a specific rule or notification was reconfigured at runtime.
type ConfigurationOverride struct {
// Specifies how the rule or notification was configured during the scan.
Configuration *ReportingConfiguration `json:"configuration,omitempty"`
// A reference used to locate the descriptor whose configuration was overridden.
Descriptor *ReportingDescriptorReference `json:"descriptor,omitempty"`
// Key/value pairs that provide additional information about the configuration override.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewConfigurationOverride - creates a new
func NewConfigurationOverride() *ConfigurationOverride {
return &ConfigurationOverride{}
}
// WithConfiguration - add a Configuration to the ConfigurationOverride
func (c *ConfigurationOverride) WithConfiguration(configuration *ReportingConfiguration) *ConfigurationOverride {
c.Configuration = configuration
return c
}
// WithDescriptor - add a Descriptor to the ConfigurationOverride
func (d *ConfigurationOverride) WithDescriptor(descriptor *ReportingDescriptorReference) *ConfigurationOverride {
d.Descriptor = descriptor
return d
}
// WithProperties - add a Properties to the ConfigurationOverride
func (p *ConfigurationOverride) WithProperties(properties *PropertyBag) *ConfigurationOverride {
p.Properties = properties
return p
}

View file

@ -0,0 +1,53 @@
package sarif
// Conversion - Describes how a converter transformed the output of a static analysis tool from the analysis tool's native output format into the SARIF format.
type Conversion struct {
// The locations of the analysis tool's per-run log files.
AnalysisToolLogFiles []*ArtifactLocation `json:"analysisToolLogFiles"`
// An invocation object that describes the invocation of the converter.
Invocation *Invocation `json:"invocation,omitempty"`
// Key/value pairs that provide additional information about the conversion.
Properties *PropertyBag `json:"properties,omitempty"`
// A tool object that describes the converter.
Tool *Tool `json:"tool,omitempty"`
}
// NewConversion - creates a new
func NewConversion() *Conversion {
return &Conversion{
AnalysisToolLogFiles: make([]*ArtifactLocation, 0),
}
}
// WithAnalysisToolLogFiles - add a AnalysisToolLogFiles to the Conversion
func (a *Conversion) WithAnalysisToolLogFiles(analysisToolLogFiles []*ArtifactLocation) *Conversion {
a.AnalysisToolLogFiles = analysisToolLogFiles
return a
}
// AddAnalysisToolLogFile - add a single AnalysisToolLogFile to the Conversion
func (a *Conversion) AddAnalysisToolLogFile(analysisToolLogFile *ArtifactLocation) *Conversion {
a.AnalysisToolLogFiles = append(a.AnalysisToolLogFiles, analysisToolLogFile)
return a
}
// WithInvocation - add a Invocation to the Conversion
func (i *Conversion) WithInvocation(invocation *Invocation) *Conversion {
i.Invocation = invocation
return i
}
// WithProperties - add a Properties to the Conversion
func (p *Conversion) WithProperties(properties *PropertyBag) *Conversion {
p.Properties = properties
return p
}
// WithTool - add a Tool to the Conversion
func (t *Conversion) WithTool(tool *Tool) *Conversion {
t.Tool = tool
return t
}

View file

@ -0,0 +1,54 @@
package sarif
// Edge - Represents a directed edge in a graph.
type Edge struct {
// A string that uniquely identifies the edge within its graph.
ID *string `json:"id,omitempty"`
// A short description of the edge.
Label *Message `json:"label,omitempty"`
// Key/value pairs that provide additional information about the edge.
Properties *PropertyBag `json:"properties,omitempty"`
// Identifies the source node (the node at which the edge starts).
SourceNodeID *string `json:"sourceNodeId,omitempty"`
// Identifies the target node (the node at which the edge ends).
TargetNodeID *string `json:"targetNodeId,omitempty"`
}
// NewEdge - creates a new
func NewEdge() *Edge {
return &Edge{}
}
// WithID - add a ID to the Edge
func (i *Edge) WithID(id string) *Edge {
i.ID = &id
return i
}
// WithLabel - add a Label to the Edge
func (l *Edge) WithLabel(label *Message) *Edge {
l.Label = label
return l
}
// WithProperties - add a Properties to the Edge
func (p *Edge) WithProperties(properties *PropertyBag) *Edge {
p.Properties = properties
return p
}
// WithSourceNodeID - add a SourceNodeID to the Edge
func (s *Edge) WithSourceNodeID(sourceNodeId string) *Edge {
s.SourceNodeID = &sourceNodeId
return s
}
// WithTargetNodeID - add a TargetNodeID to the Edge
func (t *Edge) WithTargetNodeID(targetNodeId string) *Edge {
t.TargetNodeID = &targetNodeId
return t
}

View file

@ -0,0 +1,60 @@
package sarif
// EdgeTraversal - Represents the traversal of a single edge during a graph traversal.
type EdgeTraversal struct {
// The values of relevant expressions after the edge has been traversed.
FinalState map[string]MultiformatMessageString `json:"finalState,omitempty"`
// Identifies the edge being traversed.
EdgeID *string `json:"edgeId,omitempty"`
// A message to display to the user as the edge is traversed.
Message *Message `json:"message,omitempty"`
// Key/value pairs that provide additional information about the edge traversal.
Properties *PropertyBag `json:"properties,omitempty"`
// The number of edge traversals necessary to return from a nested graph.
StepOverEdgeCount *int `json:"stepOverEdgeCount,omitempty"`
}
// NewEdgeTraversal - creates a new
func NewEdgeTraversal() *EdgeTraversal {
return &EdgeTraversal{}
}
// AddFinalState - add a single FinalState to the EdgeTraversal
func (f *EdgeTraversal) AddFinalState(key string, finalState MultiformatMessageString) *EdgeTraversal {
f.FinalState[key] = finalState
return f
}
// WithFinalState - add a FinalState to the EdgeTraversal
func (f *EdgeTraversal) WithFinalState(finalState map[string]MultiformatMessageString) *EdgeTraversal {
f.FinalState = finalState
return f
}
// WithEdgeID - add a EdgeID to the EdgeTraversal
func (e *EdgeTraversal) WithEdgeID(edgeId string) *EdgeTraversal {
e.EdgeID = &edgeId
return e
}
// WithMessage - add a Message to the EdgeTraversal
func (m *EdgeTraversal) WithMessage(message *Message) *EdgeTraversal {
m.Message = message
return m
}
// WithProperties - add a Properties to the EdgeTraversal
func (p *EdgeTraversal) WithProperties(properties *PropertyBag) *EdgeTraversal {
p.Properties = properties
return p
}
// WithStepOverEdgeCount - add a StepOverEdgeCount to the EdgeTraversal
func (s *EdgeTraversal) WithStepOverEdgeCount(stepOverEdgeCount int) *EdgeTraversal {
s.StepOverEdgeCount = &stepOverEdgeCount
return s
}

View file

@ -0,0 +1,62 @@
package sarif
// Exception - Describes a runtime exception encountered during the execution of an analysis tool.
type Exception struct {
// An array of exception objects each of which is considered a cause of this exception.
InnerExceptions []*Exception `json:"innerExceptions"`
// A string that identifies the kind of exception, for example, the fully qualified type name of an object that was thrown, or the symbolic name of a signal.
Kind *string `json:"kind,omitempty"`
// A message that describes the exception.
Message *string `json:"message,omitempty"`
// Key/value pairs that provide additional information about the exception.
Properties *PropertyBag `json:"properties,omitempty"`
// The sequence of function calls leading to the exception.
Stack *Stack `json:"stack,omitempty"`
}
// NewException - creates a new
func NewException() *Exception {
return &Exception{
InnerExceptions: make([]*Exception, 0),
}
}
// WithInnerExceptions - add a InnerExceptions to the Exception
func (i *Exception) WithInnerExceptions(innerExceptions []*Exception) *Exception {
i.InnerExceptions = innerExceptions
return i
}
// AddInnerException - add a single InnerException to the Exception
func (i *Exception) AddInnerException(innerException *Exception) *Exception {
i.InnerExceptions = append(i.InnerExceptions, innerException)
return i
}
// WithKind - add a Kind to the Exception
func (k *Exception) WithKind(kind string) *Exception {
k.Kind = &kind
return k
}
// WithMessage - add a Message to the Exception
func (m *Exception) WithMessage(message string) *Exception {
m.Message = &message
return m
}
// WithProperties - add a Properties to the Exception
func (p *Exception) WithProperties(properties *PropertyBag) *Exception {
p.Properties = properties
return p
}
// WithStack - add a Stack to the Exception
func (s *Exception) WithStack(stack *Stack) *Exception {
s.Stack = stack
return s
}

View file

@ -0,0 +1,290 @@
package sarif
// ExternalProperties - The top-level element of an external property file.
type ExternalProperties struct {
// Addresses that will be merged with a separate run.
Addresses []*Address `json:"addresses"`
// An array of artifact objects that will be merged with a separate run.
Artifacts []*Artifact `json:"artifacts,omitempty"`
// A conversion object that will be merged with a separate run.
Conversion *Conversion `json:"conversion,omitempty"`
// The analysis tool object that will be merged with a separate run.
Driver *ToolComponent `json:"driver,omitempty"`
// Tool extensions that will be merged with a separate run.
Extensions []*ToolComponent `json:"extensions"`
// Key/value pairs that provide additional information that will be merged with a separate run.
ExternalizedProperties *PropertyBag `json:"externalizedProperties,omitempty"`
// An array of graph objects that will be merged with a separate run.
Graphs []*Graph `json:"graphs"`
// A stable, unique identifier for this external properties object, in the form of a GUID.
GuID *string `json:"guid,omitempty"`
// Describes the invocation of the analysis tool that will be merged with a separate run.
Invocations []*Invocation `json:"invocations"`
// An array of logical locations such as namespaces, types or functions that will be merged with a separate run.
LogicalLocations []*LogicalLocation `json:"logicalLocations"`
// Tool policies that will be merged with a separate run.
Policies []*ToolComponent `json:"policies"`
// Key/value pairs that provide additional information about the external properties.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of result objects that will be merged with a separate run.
Results []*Result `json:"results"`
// A stable, unique identifier for the run associated with this external properties object, in the form of a GUID.
RunGuID *string `json:"runGuid,omitempty"`
// The URI of the JSON schema corresponding to the version of the external property file format.
Schema *string `json:"schema,omitempty"`
// Tool taxonomies that will be merged with a separate run.
Taxonomies []*ToolComponent `json:"taxonomies"`
// An array of threadFlowLocation objects that will be merged with a separate run.
ThreadFlowLocations []*ThreadFlowLocation `json:"threadFlowLocations"`
// Tool translations that will be merged with a separate run.
Translations []*ToolComponent `json:"translations"`
// The SARIF format version of this external properties object.
Version *string `json:"version,omitempty"`
// Requests that will be merged with a separate run.
WebRequests []*WebRequest `json:"webRequests"`
// Responses that will be merged with a separate run.
WebResponses []*WebResponse `json:"webResponses"`
}
// NewExternalProperties - creates a new
func NewExternalProperties() *ExternalProperties {
return &ExternalProperties{
Addresses: make([]*Address, 0),
Artifacts: make([]*Artifact, 0),
Extensions: make([]*ToolComponent, 0),
Graphs: make([]*Graph, 0),
Invocations: make([]*Invocation, 0),
LogicalLocations: make([]*LogicalLocation, 0),
Policies: make([]*ToolComponent, 0),
Results: make([]*Result, 0),
Taxonomies: make([]*ToolComponent, 0),
ThreadFlowLocations: make([]*ThreadFlowLocation, 0),
Translations: make([]*ToolComponent, 0),
WebRequests: make([]*WebRequest, 0),
WebResponses: make([]*WebResponse, 0),
}
}
// WithAddresses - add a Addresses to the ExternalProperties
func (a *ExternalProperties) WithAddresses(addresses []*Address) *ExternalProperties {
a.Addresses = addresses
return a
}
// AddAddresse - add a single Addresse to the ExternalProperties
func (a *ExternalProperties) AddAddresse(addresse *Address) *ExternalProperties {
a.Addresses = append(a.Addresses, addresse)
return a
}
// WithArtifacts - add a Artifacts to the ExternalProperties
func (a *ExternalProperties) WithArtifacts(artifacts []*Artifact) *ExternalProperties {
a.Artifacts = artifacts
return a
}
// AddArtifact - add a single Artifact to the ExternalProperties
func (a *ExternalProperties) AddArtifact(artifact *Artifact) *ExternalProperties {
a.Artifacts = append(a.Artifacts, artifact)
return a
}
// WithConversion - add a Conversion to the ExternalProperties
func (c *ExternalProperties) WithConversion(conversion *Conversion) *ExternalProperties {
c.Conversion = conversion
return c
}
// WithDriver - add a Driver to the ExternalProperties
func (d *ExternalProperties) WithDriver(driver *ToolComponent) *ExternalProperties {
d.Driver = driver
return d
}
// WithExtensions - add a Extensions to the ExternalProperties
func (e *ExternalProperties) WithExtensions(extensions []*ToolComponent) *ExternalProperties {
e.Extensions = extensions
return e
}
// AddExtension - add a single Extension to the ExternalProperties
func (e *ExternalProperties) AddExtension(extension *ToolComponent) *ExternalProperties {
e.Extensions = append(e.Extensions, extension)
return e
}
// WithExternalizedProperties - add a ExternalizedProperties to the ExternalProperties
func (e *ExternalProperties) WithExternalizedProperties(externalizedProperties *PropertyBag) *ExternalProperties {
e.ExternalizedProperties = externalizedProperties
return e
}
// WithGraphs - add a Graphs to the ExternalProperties
func (g *ExternalProperties) WithGraphs(graphs []*Graph) *ExternalProperties {
g.Graphs = graphs
return g
}
// AddGraph - add a single Graph to the ExternalProperties
func (g *ExternalProperties) AddGraph(graph *Graph) *ExternalProperties {
g.Graphs = append(g.Graphs, graph)
return g
}
// WithGuID - add a GuID to the ExternalProperties
func (g *ExternalProperties) WithGuID(guid string) *ExternalProperties {
g.GuID = &guid
return g
}
// WithInvocations - add a Invocations to the ExternalProperties
func (i *ExternalProperties) WithInvocations(invocations []*Invocation) *ExternalProperties {
i.Invocations = invocations
return i
}
// AddInvocation - add a single Invocation to the ExternalProperties
func (i *ExternalProperties) AddInvocation(invocation *Invocation) *ExternalProperties {
i.Invocations = append(i.Invocations, invocation)
return i
}
// WithLogicalLocations - add a LogicalLocations to the ExternalProperties
func (l *ExternalProperties) WithLogicalLocations(logicalLocations []*LogicalLocation) *ExternalProperties {
l.LogicalLocations = logicalLocations
return l
}
// AddLogicalLocation - add a single LogicalLocation to the ExternalProperties
func (l *ExternalProperties) AddLogicalLocation(logicalLocation *LogicalLocation) *ExternalProperties {
l.LogicalLocations = append(l.LogicalLocations, logicalLocation)
return l
}
// WithPolicies - add a Policies to the ExternalProperties
func (p *ExternalProperties) WithPolicies(policies []*ToolComponent) *ExternalProperties {
p.Policies = policies
return p
}
// AddPolicie - add a single Policie to the ExternalProperties
func (p *ExternalProperties) AddPolicie(policie *ToolComponent) *ExternalProperties {
p.Policies = append(p.Policies, policie)
return p
}
// WithProperties - add a Properties to the ExternalProperties
func (p *ExternalProperties) WithProperties(properties *PropertyBag) *ExternalProperties {
p.Properties = properties
return p
}
// WithResults - add a Results to the ExternalProperties
func (r *ExternalProperties) WithResults(results []*Result) *ExternalProperties {
r.Results = results
return r
}
// AddResult - add a single Result to the ExternalProperties
func (r *ExternalProperties) AddResult(result *Result) *ExternalProperties {
r.Results = append(r.Results, result)
return r
}
// WithRunGuID - add a RunGuID to the ExternalProperties
func (r *ExternalProperties) WithRunGuID(runGuid string) *ExternalProperties {
r.RunGuID = &runGuid
return r
}
// WithSchema - add a Schema to the ExternalProperties
func (s *ExternalProperties) WithSchema(schema string) *ExternalProperties {
s.Schema = &schema
return s
}
// WithTaxonomies - add a Taxonomies to the ExternalProperties
func (t *ExternalProperties) WithTaxonomies(taxonomies []*ToolComponent) *ExternalProperties {
t.Taxonomies = taxonomies
return t
}
// AddTaxonomie - add a single Taxonomie to the ExternalProperties
func (t *ExternalProperties) AddTaxonomie(taxonomie *ToolComponent) *ExternalProperties {
t.Taxonomies = append(t.Taxonomies, taxonomie)
return t
}
// WithThreadFlowLocations - add a ThreadFlowLocations to the ExternalProperties
func (t *ExternalProperties) WithThreadFlowLocations(threadFlowLocations []*ThreadFlowLocation) *ExternalProperties {
t.ThreadFlowLocations = threadFlowLocations
return t
}
// AddThreadFlowLocation - add a single ThreadFlowLocation to the ExternalProperties
func (t *ExternalProperties) AddThreadFlowLocation(threadFlowLocation *ThreadFlowLocation) *ExternalProperties {
t.ThreadFlowLocations = append(t.ThreadFlowLocations, threadFlowLocation)
return t
}
// WithTranslations - add a Translations to the ExternalProperties
func (t *ExternalProperties) WithTranslations(translations []*ToolComponent) *ExternalProperties {
t.Translations = translations
return t
}
// AddTranslation - add a single Translation to the ExternalProperties
func (t *ExternalProperties) AddTranslation(translation *ToolComponent) *ExternalProperties {
t.Translations = append(t.Translations, translation)
return t
}
// WithVersion - add a Version to the ExternalProperties
func (v *ExternalProperties) WithVersion(version string) *ExternalProperties {
v.Version = &version
return v
}
// WithWebRequests - add a WebRequests to the ExternalProperties
func (w *ExternalProperties) WithWebRequests(webRequests []*WebRequest) *ExternalProperties {
w.WebRequests = webRequests
return w
}
// AddWebRequest - add a single WebRequest to the ExternalProperties
func (w *ExternalProperties) AddWebRequest(webRequest *WebRequest) *ExternalProperties {
w.WebRequests = append(w.WebRequests, webRequest)
return w
}
// WithWebResponses - add a WebResponses to the ExternalProperties
func (w *ExternalProperties) WithWebResponses(webResponses []*WebResponse) *ExternalProperties {
w.WebResponses = webResponses
return w
}
// AddWebResponse - add a single WebResponse to the ExternalProperties
func (w *ExternalProperties) AddWebResponse(webResponse *WebResponse) *ExternalProperties {
w.WebResponses = append(w.WebResponses, webResponse)
return w
}

View file

@ -0,0 +1,47 @@
package sarif
// ExternalPropertyFileReference - Contains information that enables a SARIF consumer to locate the external property file that contains the value of an externalized property associated with the run.
type ExternalPropertyFileReference struct {
// A stable, unique identifier for the external property file in the form of a GUID.
GuID *string `json:"guid,omitempty"`
// A non-negative integer specifying the number of items contained in the external property file.
ItemCount int `json:"itemCount"`
// The location of the external property file.
Location *ArtifactLocation `json:"location,omitempty"`
// Key/value pairs that provide additional information about the external property file.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewExternalPropertyFileReference - creates a new
func NewExternalPropertyFileReference() *ExternalPropertyFileReference {
return &ExternalPropertyFileReference{
ItemCount: -1,
}
}
// WithGuID - add a GuID to the ExternalPropertyFileReference
func (g *ExternalPropertyFileReference) WithGuID(guid string) *ExternalPropertyFileReference {
g.GuID = &guid
return g
}
// WithItemCount - add a ItemCount to the ExternalPropertyFileReference
func (i *ExternalPropertyFileReference) WithItemCount(itemCount int) *ExternalPropertyFileReference {
i.ItemCount = itemCount
return i
}
// WithLocation - add a Location to the ExternalPropertyFileReference
func (l *ExternalPropertyFileReference) WithLocation(location *ArtifactLocation) *ExternalPropertyFileReference {
l.Location = location
return l
}
// WithProperties - add a Properties to the ExternalPropertyFileReference
func (p *ExternalPropertyFileReference) WithProperties(properties *PropertyBag) *ExternalPropertyFileReference {
p.Properties = properties
return p
}

View file

@ -0,0 +1,254 @@
package sarif
// ExternalPropertyFileReferences - References to external property files that should be inlined with the content of a root log file.
type ExternalPropertyFileReferences struct {
// An array of external property files containing run.addresses arrays to be merged with the root log file.
Addresses []*ExternalPropertyFileReference `json:"addresses"`
// An array of external property files containing run.artifacts arrays to be merged with the root log file.
Artifacts []*ExternalPropertyFileReference `json:"artifacts"`
// An external property file containing a run.conversion object to be merged with the root log file.
Conversion *ExternalPropertyFileReference `json:"conversion,omitempty"`
// An external property file containing a run.driver object to be merged with the root log file.
Driver *ExternalPropertyFileReference `json:"driver,omitempty"`
// An array of external property files containing run.extensions arrays to be merged with the root log file.
Extensions []*ExternalPropertyFileReference `json:"extensions"`
// An external property file containing a run.properties object to be merged with the root log file.
ExternalizedProperties *ExternalPropertyFileReference `json:"externalizedProperties,omitempty"`
// An array of external property files containing a run.graphs object to be merged with the root log file.
Graphs []*ExternalPropertyFileReference `json:"graphs"`
// An array of external property files containing run.invocations arrays to be merged with the root log file.
Invocations []*ExternalPropertyFileReference `json:"invocations"`
// An array of external property files containing run.logicalLocations arrays to be merged with the root log file.
LogicalLocations []*ExternalPropertyFileReference `json:"logicalLocations"`
// An array of external property files containing run.policies arrays to be merged with the root log file.
Policies []*ExternalPropertyFileReference `json:"policies"`
// Key/value pairs that provide additional information about the external property files.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of external property files containing run.results arrays to be merged with the root log file.
Results []*ExternalPropertyFileReference `json:"results"`
// An array of external property files containing run.taxonomies arrays to be merged with the root log file.
Taxonomies []*ExternalPropertyFileReference `json:"taxonomies"`
// An array of external property files containing run.threadFlowLocations arrays to be merged with the root log file.
ThreadFlowLocations []*ExternalPropertyFileReference `json:"threadFlowLocations"`
// An array of external property files containing run.translations arrays to be merged with the root log file.
Translations []*ExternalPropertyFileReference `json:"translations"`
// An array of external property files containing run.requests arrays to be merged with the root log file.
WebRequests []*ExternalPropertyFileReference `json:"webRequests"`
// An array of external property files containing run.responses arrays to be merged with the root log file.
WebResponses []*ExternalPropertyFileReference `json:"webResponses"`
}
// NewExternalPropertyFileReferences - creates a new
func NewExternalPropertyFileReferences() *ExternalPropertyFileReferences {
return &ExternalPropertyFileReferences{
Addresses: make([]*ExternalPropertyFileReference, 0),
Artifacts: make([]*ExternalPropertyFileReference, 0),
Extensions: make([]*ExternalPropertyFileReference, 0),
Graphs: make([]*ExternalPropertyFileReference, 0),
Invocations: make([]*ExternalPropertyFileReference, 0),
LogicalLocations: make([]*ExternalPropertyFileReference, 0),
Policies: make([]*ExternalPropertyFileReference, 0),
Results: make([]*ExternalPropertyFileReference, 0),
Taxonomies: make([]*ExternalPropertyFileReference, 0),
ThreadFlowLocations: make([]*ExternalPropertyFileReference, 0),
Translations: make([]*ExternalPropertyFileReference, 0),
WebRequests: make([]*ExternalPropertyFileReference, 0),
WebResponses: make([]*ExternalPropertyFileReference, 0),
}
}
// WithAddresses - add a Addresses to the ExternalPropertyFileReferences
func (a *ExternalPropertyFileReferences) WithAddresses(addresses []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
a.Addresses = addresses
return a
}
// AddAddresse - add a single Addresse to the ExternalPropertyFileReferences
func (a *ExternalPropertyFileReferences) AddAddresse(addresse *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
a.Addresses = append(a.Addresses, addresse)
return a
}
// WithArtifacts - add a Artifacts to the ExternalPropertyFileReferences
func (a *ExternalPropertyFileReferences) WithArtifacts(artifacts []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
a.Artifacts = artifacts
return a
}
// AddArtifact - add a single Artifact to the ExternalPropertyFileReferences
func (a *ExternalPropertyFileReferences) AddArtifact(artifact *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
a.Artifacts = append(a.Artifacts, artifact)
return a
}
// WithConversion - add a Conversion to the ExternalPropertyFileReferences
func (c *ExternalPropertyFileReferences) WithConversion(conversion *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
c.Conversion = conversion
return c
}
// WithDriver - add a Driver to the ExternalPropertyFileReferences
func (d *ExternalPropertyFileReferences) WithDriver(driver *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
d.Driver = driver
return d
}
// WithExtensions - add a Extensions to the ExternalPropertyFileReferences
func (e *ExternalPropertyFileReferences) WithExtensions(extensions []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
e.Extensions = extensions
return e
}
// AddExtension - add a single Extension to the ExternalPropertyFileReferences
func (e *ExternalPropertyFileReferences) AddExtension(extension *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
e.Extensions = append(e.Extensions, extension)
return e
}
// WithExternalizedProperties - add a ExternalizedProperties to the ExternalPropertyFileReferences
func (e *ExternalPropertyFileReferences) WithExternalizedProperties(externalizedProperties *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
e.ExternalizedProperties = externalizedProperties
return e
}
// WithGraphs - add a Graphs to the ExternalPropertyFileReferences
func (g *ExternalPropertyFileReferences) WithGraphs(graphs []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
g.Graphs = graphs
return g
}
// AddGraph - add a single Graph to the ExternalPropertyFileReferences
func (g *ExternalPropertyFileReferences) AddGraph(graph *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
g.Graphs = append(g.Graphs, graph)
return g
}
// WithInvocations - add a Invocations to the ExternalPropertyFileReferences
func (i *ExternalPropertyFileReferences) WithInvocations(invocations []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
i.Invocations = invocations
return i
}
// AddInvocation - add a single Invocation to the ExternalPropertyFileReferences
func (i *ExternalPropertyFileReferences) AddInvocation(invocation *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
i.Invocations = append(i.Invocations, invocation)
return i
}
// WithLogicalLocations - add a LogicalLocations to the ExternalPropertyFileReferences
func (l *ExternalPropertyFileReferences) WithLogicalLocations(logicalLocations []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
l.LogicalLocations = logicalLocations
return l
}
// AddLogicalLocation - add a single LogicalLocation to the ExternalPropertyFileReferences
func (l *ExternalPropertyFileReferences) AddLogicalLocation(logicalLocation *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
l.LogicalLocations = append(l.LogicalLocations, logicalLocation)
return l
}
// WithPolicies - add a Policies to the ExternalPropertyFileReferences
func (p *ExternalPropertyFileReferences) WithPolicies(policies []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
p.Policies = policies
return p
}
// AddPolicie - add a single Policie to the ExternalPropertyFileReferences
func (p *ExternalPropertyFileReferences) AddPolicie(policie *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
p.Policies = append(p.Policies, policie)
return p
}
// WithProperties - add a Properties to the ExternalPropertyFileReferences
func (p *ExternalPropertyFileReferences) WithProperties(properties *PropertyBag) *ExternalPropertyFileReferences {
p.Properties = properties
return p
}
// WithResults - add a Results to the ExternalPropertyFileReferences
func (r *ExternalPropertyFileReferences) WithResults(results []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
r.Results = results
return r
}
// AddResult - add a single Result to the ExternalPropertyFileReferences
func (r *ExternalPropertyFileReferences) AddResult(result *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
r.Results = append(r.Results, result)
return r
}
// WithTaxonomies - add a Taxonomies to the ExternalPropertyFileReferences
func (t *ExternalPropertyFileReferences) WithTaxonomies(taxonomies []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
t.Taxonomies = taxonomies
return t
}
// AddTaxonomie - add a single Taxonomie to the ExternalPropertyFileReferences
func (t *ExternalPropertyFileReferences) AddTaxonomie(taxonomie *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
t.Taxonomies = append(t.Taxonomies, taxonomie)
return t
}
// WithThreadFlowLocations - add a ThreadFlowLocations to the ExternalPropertyFileReferences
func (t *ExternalPropertyFileReferences) WithThreadFlowLocations(threadFlowLocations []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
t.ThreadFlowLocations = threadFlowLocations
return t
}
// AddThreadFlowLocation - add a single ThreadFlowLocation to the ExternalPropertyFileReferences
func (t *ExternalPropertyFileReferences) AddThreadFlowLocation(threadFlowLocation *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
t.ThreadFlowLocations = append(t.ThreadFlowLocations, threadFlowLocation)
return t
}
// WithTranslations - add a Translations to the ExternalPropertyFileReferences
func (t *ExternalPropertyFileReferences) WithTranslations(translations []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
t.Translations = translations
return t
}
// AddTranslation - add a single Translation to the ExternalPropertyFileReferences
func (t *ExternalPropertyFileReferences) AddTranslation(translation *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
t.Translations = append(t.Translations, translation)
return t
}
// WithWebRequests - add a WebRequests to the ExternalPropertyFileReferences
func (w *ExternalPropertyFileReferences) WithWebRequests(webRequests []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
w.WebRequests = webRequests
return w
}
// AddWebRequest - add a single WebRequest to the ExternalPropertyFileReferences
func (w *ExternalPropertyFileReferences) AddWebRequest(webRequest *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
w.WebRequests = append(w.WebRequests, webRequest)
return w
}
// WithWebResponses - add a WebResponses to the ExternalPropertyFileReferences
func (w *ExternalPropertyFileReferences) WithWebResponses(webResponses []*ExternalPropertyFileReference) *ExternalPropertyFileReferences {
w.WebResponses = webResponses
return w
}
// AddWebResponse - add a single WebResponse to the ExternalPropertyFileReferences
func (w *ExternalPropertyFileReferences) AddWebResponse(webResponse *ExternalPropertyFileReference) *ExternalPropertyFileReferences {
w.WebResponses = append(w.WebResponses, webResponse)
return w
}

View file

@ -0,0 +1,44 @@
package sarif
// Fix - A proposed fix for the problem represented by a result object. A fix specifies a set of artifacts to modify. For each artifact, it specifies a set of bytes to remove, and provides a set of new bytes to replace them.
type Fix struct {
// One or more artifact changes that comprise a fix for a result.
ArtifactChanges []*ArtifactChange `json:"artifactChanges,omitempty"`
// A message that describes the proposed fix, enabling viewers to present the proposed change to an end user.
Description *Message `json:"description,omitempty"`
// Key/value pairs that provide additional information about the fix.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewFix - creates a new
func NewFix() *Fix {
return &Fix{
ArtifactChanges: make([]*ArtifactChange, 0),
}
}
// WithArtifactChanges - add a ArtifactChanges to the Fix
func (a *Fix) WithArtifactChanges(artifactChanges []*ArtifactChange) *Fix {
a.ArtifactChanges = artifactChanges
return a
}
// AddArtifactChange - add a single ArtifactChange to the Fix
func (a *Fix) AddArtifactChange(artifactChange *ArtifactChange) *Fix {
a.ArtifactChanges = append(a.ArtifactChanges, artifactChange)
return a
}
// WithDescription - add a Description to the Fix
func (d *Fix) WithDescription(description *Message) *Fix {
d.Description = description
return d
}
// WithProperties - add a Properties to the Fix
func (p *Fix) WithProperties(properties *PropertyBag) *Fix {
p.Properties = properties
return p
}

View file

@ -0,0 +1,60 @@
package sarif
// Graph - A network of nodes and directed edges that describes some aspect of the structure of the code (for example, a call graph).
type Graph struct {
// A description of the graph.
Description *Message `json:"description,omitempty"`
// An array of edge objects representing the edges of the graph.
Edges []*Edge `json:"edges"`
// An array of node objects representing the nodes of the graph.
Nodes []*Node `json:"nodes"`
// Key/value pairs that provide additional information about the graph.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewGraph - creates a new
func NewGraph() *Graph {
return &Graph{
Edges: make([]*Edge, 0),
Nodes: make([]*Node, 0),
}
}
// WithDescription - add a Description to the Graph
func (d *Graph) WithDescription(description *Message) *Graph {
d.Description = description
return d
}
// WithEdges - add a Edges to the Graph
func (e *Graph) WithEdges(edges []*Edge) *Graph {
e.Edges = edges
return e
}
// AddEdge - add a single Edge to the Graph
func (e *Graph) AddEdge(edge *Edge) *Graph {
e.Edges = append(e.Edges, edge)
return e
}
// WithNodes - add a Nodes to the Graph
func (n *Graph) WithNodes(nodes []*Node) *Graph {
n.Nodes = nodes
return n
}
// AddNode - add a single Node to the Graph
func (n *Graph) AddNode(node *Node) *Graph {
n.Nodes = append(n.Nodes, node)
return n
}
// WithProperties - add a Properties to the Graph
func (p *Graph) WithProperties(properties *PropertyBag) *Graph {
p.Properties = properties
return p
}

View file

@ -0,0 +1,94 @@
package sarif
// GraphTraversal - Represents a path through a graph.
type GraphTraversal struct {
// Values of relevant expressions at the start of the graph traversal that may change during graph traversal.
InitialState map[string]MultiformatMessageString `json:"initialState,omitempty"`
// Values of relevant expressions at the start of the graph traversal that remain constant for the graph traversal.
ImmutableState map[string]MultiformatMessageString `json:"immutableState,omitempty"`
// A description of this graph traversal.
Description *Message `json:"description,omitempty"`
// The sequences of edges traversed by this graph traversal.
EdgeTraversals []*EdgeTraversal `json:"edgeTraversals"`
// Key/value pairs that provide additional information about the graph traversal.
Properties *PropertyBag `json:"properties,omitempty"`
// The index within the result.graphs to be associated with the result.
ResultGraphIndex int `json:"resultGraphIndex"`
// The index within the run.graphs to be associated with the result.
RunGraphIndex int `json:"runGraphIndex"`
}
// NewGraphTraversal - creates a new
func NewGraphTraversal() *GraphTraversal {
return &GraphTraversal{
EdgeTraversals: make([]*EdgeTraversal, 0),
ResultGraphIndex: -1,
RunGraphIndex: -1,
}
}
// AddInitialState - add a single InitialState to the GraphTraversal
func (i *GraphTraversal) AddInitialState(key string, initialState MultiformatMessageString) *GraphTraversal {
i.InitialState[key] = initialState
return i
}
// WithInitialState - add a InitialState to the GraphTraversal
func (i *GraphTraversal) WithInitialState(initialState map[string]MultiformatMessageString) *GraphTraversal {
i.InitialState = initialState
return i
}
// AddImmutableState - add a single ImmutableState to the GraphTraversal
func (i *GraphTraversal) AddImmutableState(key string, immutableState MultiformatMessageString) *GraphTraversal {
i.ImmutableState[key] = immutableState
return i
}
// WithImmutableState - add a ImmutableState to the GraphTraversal
func (i *GraphTraversal) WithImmutableState(immutableState map[string]MultiformatMessageString) *GraphTraversal {
i.ImmutableState = immutableState
return i
}
// WithDescription - add a Description to the GraphTraversal
func (d *GraphTraversal) WithDescription(description *Message) *GraphTraversal {
d.Description = description
return d
}
// WithEdgeTraversals - add a EdgeTraversals to the GraphTraversal
func (e *GraphTraversal) WithEdgeTraversals(edgeTraversals []*EdgeTraversal) *GraphTraversal {
e.EdgeTraversals = edgeTraversals
return e
}
// AddEdgeTraversal - add a single EdgeTraversal to the GraphTraversal
func (e *GraphTraversal) AddEdgeTraversal(edgeTraversal *EdgeTraversal) *GraphTraversal {
e.EdgeTraversals = append(e.EdgeTraversals, edgeTraversal)
return e
}
// WithProperties - add a Properties to the GraphTraversal
func (p *GraphTraversal) WithProperties(properties *PropertyBag) *GraphTraversal {
p.Properties = properties
return p
}
// WithResultGraphIndex - add a ResultGraphIndex to the GraphTraversal
func (r *GraphTraversal) WithResultGraphIndex(resultGraphIndex int) *GraphTraversal {
r.ResultGraphIndex = resultGraphIndex
return r
}
// WithRunGraphIndex - add a RunGraphIndex to the GraphTraversal
func (r *GraphTraversal) WithRunGraphIndex(runGraphIndex int) *GraphTraversal {
r.RunGraphIndex = runGraphIndex
return r
}

View file

@ -0,0 +1,292 @@
package sarif
// Invocation - The runtime environment of the analysis tool run.
type Invocation struct {
// The environment variables associated with the analysis tool process, expressed as key/value pairs.
EnvironmentVariables map[string]string `json:"environmentVariables,omitempty"`
// The account under which the invocation occurred.
Account *string `json:"account,omitempty"`
// An array of strings, containing in order the command line arguments passed to the tool from the operating system.
Arguments []string `json:"arguments,omitempty"`
// The command line used to invoke the tool.
CommandLine *string `json:"commandLine,omitempty"`
// The Coordinated Universal Time (UTC) date and time at which the invocation ended. See "Date/time properties" in the SARIF spec for the required format.
EndTimeUtc *string `json:"endTimeUtc,omitempty"`
// An absolute URI specifying the location of the executable that was invoked.
ExecutableLocation *ArtifactLocation `json:"executableLocation,omitempty"`
// Specifies whether the tool's execution completed successfully.
ExecutionSuccessful *bool `json:"executionSuccessful,omitempty"`
// The process exit code.
ExitCode *int `json:"exitCode,omitempty"`
// The reason for the process exit.
ExitCodeDescription *string `json:"exitCodeDescription,omitempty"`
// The name of the signal that caused the process to exit.
ExitSignalName *string `json:"exitSignalName,omitempty"`
// The numeric value of the signal that caused the process to exit.
ExitSignalNumber *int `json:"exitSignalNumber,omitempty"`
// The machine on which the invocation occurred.
Machine *string `json:"machine,omitempty"`
// An array of configurationOverride objects that describe notifications related runtime overrides.
NotificationConfigurationOverrides []*ConfigurationOverride `json:"notificationConfigurationOverrides"`
// The id of the process in which the invocation occurred.
ProcessID *int `json:"processId,omitempty"`
// The reason given by the operating system that the process failed to start.
ProcessStartFailureMessage *string `json:"processStartFailureMessage,omitempty"`
// Key/value pairs that provide additional information about the invocation.
Properties *PropertyBag `json:"properties,omitempty"`
// The locations of any response files specified on the tool's command line.
ResponseFiles []*ArtifactLocation `json:"responseFiles,omitempty"`
// An array of configurationOverride objects that describe rules related runtime overrides.
RuleConfigurationOverrides []*ConfigurationOverride `json:"ruleConfigurationOverrides"`
// The Coordinated Universal Time (UTC) date and time at which the invocation started. See "Date/time properties" in the SARIF spec for the required format.
StartTimeUtc *string `json:"startTimeUtc,omitempty"`
// A file containing the standard error stream from the process that was invoked.
Stderr *ArtifactLocation `json:"stderr,omitempty"`
// A file containing the standard input stream to the process that was invoked.
Stdin *ArtifactLocation `json:"stdin,omitempty"`
// A file containing the standard output stream from the process that was invoked.
Stdout *ArtifactLocation `json:"stdout,omitempty"`
// A file containing the interleaved standard output and standard error stream from the process that was invoked.
StdoutStderr *ArtifactLocation `json:"stdoutStderr,omitempty"`
// A list of conditions detected by the tool that are relevant to the tool's configuration.
ToolConfigurationNotifications []*Notification `json:"toolConfigurationNotifications"`
// A list of runtime conditions detected by the tool during the analysis.
ToolExecutionNotifications []*Notification `json:"toolExecutionNotifications"`
// The working directory for the invocation.
WorkingDirectory *ArtifactLocation `json:"workingDirectory,omitempty"`
}
// NewInvocation - creates a new
func NewInvocation() *Invocation {
return &Invocation{
Arguments: make([]string, 0),
NotificationConfigurationOverrides: make([]*ConfigurationOverride, 0),
ResponseFiles: make([]*ArtifactLocation, 0),
RuleConfigurationOverrides: make([]*ConfigurationOverride, 0),
ToolConfigurationNotifications: make([]*Notification, 0),
ToolExecutionNotifications: make([]*Notification, 0),
}
}
// AddEnvironmentVariable - add a single EnvironmentVariable to the Invocation
func (e *Invocation) AddEnvironmentVariable(key, environmentVariable string) *Invocation {
e.EnvironmentVariables[key] = environmentVariable
return e
}
// WithEnvironmentVariables - add a EnvironmentVariables to the Invocation
func (e *Invocation) WithEnvironmentVariables(environmentVariables map[string]string) *Invocation {
e.EnvironmentVariables = environmentVariables
return e
}
// WithAccount - add a Account to the Invocation
func (a *Invocation) WithAccount(account string) *Invocation {
a.Account = &account
return a
}
// WithArguments - add a Arguments to the Invocation
func (a *Invocation) WithArguments(arguments []string) *Invocation {
a.Arguments = arguments
return a
}
// AddArgument - add a single Argument to the Invocation
func (a *Invocation) AddArgument(argument string) *Invocation {
a.Arguments = append(a.Arguments, argument)
return a
}
// WithCommandLine - add a CommandLine to the Invocation
func (c *Invocation) WithCommandLine(commandLine string) *Invocation {
c.CommandLine = &commandLine
return c
}
// WithEndTimeUtc - add a EndTimeUtc to the Invocation
func (e *Invocation) WithEndTimeUtc(endTimeUtc string) *Invocation {
e.EndTimeUtc = &endTimeUtc
return e
}
// WithExecutableLocation - add a ExecutableLocation to the Invocation
func (e *Invocation) WithExecutableLocation(executableLocation *ArtifactLocation) *Invocation {
e.ExecutableLocation = executableLocation
return e
}
// WithExecutionSuccessful - add a ExecutionSuccessful to the Invocation
func (e *Invocation) WithExecutionSuccessful(executionSuccessful bool) *Invocation {
e.ExecutionSuccessful = &executionSuccessful
return e
}
// WithExitCode - add a ExitCode to the Invocation
func (e *Invocation) WithExitCode(exitCode int) *Invocation {
e.ExitCode = &exitCode
return e
}
// WithExitCodeDescription - add a ExitCodeDescription to the Invocation
func (e *Invocation) WithExitCodeDescription(exitCodeDescription string) *Invocation {
e.ExitCodeDescription = &exitCodeDescription
return e
}
// WithExitSignalName - add a ExitSignalName to the Invocation
func (e *Invocation) WithExitSignalName(exitSignalName string) *Invocation {
e.ExitSignalName = &exitSignalName
return e
}
// WithExitSignalNumber - add a ExitSignalNumber to the Invocation
func (e *Invocation) WithExitSignalNumber(exitSignalNumber int) *Invocation {
e.ExitSignalNumber = &exitSignalNumber
return e
}
// WithMachine - add a Machine to the Invocation
func (m *Invocation) WithMachine(machine string) *Invocation {
m.Machine = &machine
return m
}
// WithNotificationConfigurationOverrides - add a NotificationConfigurationOverrides to the Invocation
func (n *Invocation) WithNotificationConfigurationOverrides(notificationConfigurationOverrides []*ConfigurationOverride) *Invocation {
n.NotificationConfigurationOverrides = notificationConfigurationOverrides
return n
}
// AddNotificationConfigurationOverride - add a single NotificationConfigurationOverride to the Invocation
func (n *Invocation) AddNotificationConfigurationOverride(notificationConfigurationOverride *ConfigurationOverride) *Invocation {
n.NotificationConfigurationOverrides = append(n.NotificationConfigurationOverrides, notificationConfigurationOverride)
return n
}
// WithProcessID - add a ProcessID to the Invocation
func (p *Invocation) WithProcessID(processId int) *Invocation {
p.ProcessID = &processId
return p
}
// WithProcessStartFailureMessage - add a ProcessStartFailureMessage to the Invocation
func (p *Invocation) WithProcessStartFailureMessage(processStartFailureMessage string) *Invocation {
p.ProcessStartFailureMessage = &processStartFailureMessage
return p
}
// WithProperties - add a Properties to the Invocation
func (p *Invocation) WithProperties(properties *PropertyBag) *Invocation {
p.Properties = properties
return p
}
// WithResponseFiles - add a ResponseFiles to the Invocation
func (r *Invocation) WithResponseFiles(responseFiles []*ArtifactLocation) *Invocation {
r.ResponseFiles = responseFiles
return r
}
// AddResponseFile - add a single ResponseFile to the Invocation
func (r *Invocation) AddResponseFile(responseFile *ArtifactLocation) *Invocation {
r.ResponseFiles = append(r.ResponseFiles, responseFile)
return r
}
// WithRuleConfigurationOverrides - add a RuleConfigurationOverrides to the Invocation
func (r *Invocation) WithRuleConfigurationOverrides(ruleConfigurationOverrides []*ConfigurationOverride) *Invocation {
r.RuleConfigurationOverrides = ruleConfigurationOverrides
return r
}
// AddRuleConfigurationOverride - add a single RuleConfigurationOverride to the Invocation
func (r *Invocation) AddRuleConfigurationOverride(ruleConfigurationOverride *ConfigurationOverride) *Invocation {
r.RuleConfigurationOverrides = append(r.RuleConfigurationOverrides, ruleConfigurationOverride)
return r
}
// WithStartTimeUtc - add a StartTimeUtc to the Invocation
func (s *Invocation) WithStartTimeUtc(startTimeUtc string) *Invocation {
s.StartTimeUtc = &startTimeUtc
return s
}
// WithStderr - add a Stderr to the Invocation
func (s *Invocation) WithStderr(stderr *ArtifactLocation) *Invocation {
s.Stderr = stderr
return s
}
// WithStdin - add a Stdin to the Invocation
func (s *Invocation) WithStdin(stdin *ArtifactLocation) *Invocation {
s.Stdin = stdin
return s
}
// WithStdout - add a Stdout to the Invocation
func (s *Invocation) WithStdout(stdout *ArtifactLocation) *Invocation {
s.Stdout = stdout
return s
}
// WithStdoutStderr - add a StdoutStderr to the Invocation
func (s *Invocation) WithStdoutStderr(stdoutStderr *ArtifactLocation) *Invocation {
s.StdoutStderr = stdoutStderr
return s
}
// WithToolConfigurationNotifications - add a ToolConfigurationNotifications to the Invocation
func (t *Invocation) WithToolConfigurationNotifications(toolConfigurationNotifications []*Notification) *Invocation {
t.ToolConfigurationNotifications = toolConfigurationNotifications
return t
}
// AddToolConfigurationNotification - add a single ToolConfigurationNotification to the Invocation
func (t *Invocation) AddToolConfigurationNotification(toolConfigurationNotification *Notification) *Invocation {
t.ToolConfigurationNotifications = append(t.ToolConfigurationNotifications, toolConfigurationNotification)
return t
}
// WithToolExecutionNotifications - add a ToolExecutionNotifications to the Invocation
func (t *Invocation) WithToolExecutionNotifications(toolExecutionNotifications []*Notification) *Invocation {
t.ToolExecutionNotifications = toolExecutionNotifications
return t
}
// AddToolExecutionNotification - add a single ToolExecutionNotification to the Invocation
func (t *Invocation) AddToolExecutionNotification(toolExecutionNotification *Notification) *Invocation {
t.ToolExecutionNotifications = append(t.ToolExecutionNotifications, toolExecutionNotification)
return t
}
// WithWorkingDirectory - add a WorkingDirectory to the Invocation
func (w *Invocation) WithWorkingDirectory(workingDirectory *ArtifactLocation) *Invocation {
w.WorkingDirectory = workingDirectory
return w
}

View file

@ -0,0 +1,95 @@
package sarif
// Location - A location within a programming artifact.
type Location struct {
// A set of regions relevant to the location.
Annotations []*Region `json:"annotations"`
// Value that distinguishes this location from all other locations within a single result object.
ID int `json:"id"`
// The logical locations associated with the result.
LogicalLocations []*LogicalLocation `json:"logicalLocations"`
// A message relevant to the location.
Message *Message `json:"message,omitempty"`
// Identifies the artifact and region.
PhysicalLocation *PhysicalLocation `json:"physicalLocation,omitempty"`
// Key/value pairs that provide additional information about the location.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of objects that describe relationships between this location and others.
Relationships []*LocationRelationship `json:"relationships"`
}
// NewLocation - creates a new
func NewLocation() *Location {
return &Location{
Annotations: make([]*Region, 0),
ID: -1,
LogicalLocations: make([]*LogicalLocation, 0),
Relationships: make([]*LocationRelationship, 0),
}
}
// WithAnnotations - add a Annotations to the Location
func (a *Location) WithAnnotations(annotations []*Region) *Location {
a.Annotations = annotations
return a
}
// AddAnnotation - add a single Annotation to the Location
func (a *Location) AddAnnotation(annotation *Region) *Location {
a.Annotations = append(a.Annotations, annotation)
return a
}
// WithID - add a ID to the Location
func (i *Location) WithID(id int) *Location {
i.ID = id
return i
}
// WithLogicalLocations - add a LogicalLocations to the Location
func (l *Location) WithLogicalLocations(logicalLocations []*LogicalLocation) *Location {
l.LogicalLocations = logicalLocations
return l
}
// AddLogicalLocation - add a single LogicalLocation to the Location
func (l *Location) AddLogicalLocation(logicalLocation *LogicalLocation) *Location {
l.LogicalLocations = append(l.LogicalLocations, logicalLocation)
return l
}
// WithMessage - add a Message to the Location
func (m *Location) WithMessage(message *Message) *Location {
m.Message = message
return m
}
// WithPhysicalLocation - add a PhysicalLocation to the Location
func (p *Location) WithPhysicalLocation(physicalLocation *PhysicalLocation) *Location {
p.PhysicalLocation = physicalLocation
return p
}
// WithProperties - add a Properties to the Location
func (p *Location) WithProperties(properties *PropertyBag) *Location {
p.Properties = properties
return p
}
// WithRelationships - add a Relationships to the Location
func (r *Location) WithRelationships(relationships []*LocationRelationship) *Location {
r.Relationships = relationships
return r
}
// AddRelationship - add a single Relationship to the Location
func (r *Location) AddRelationship(relationship *LocationRelationship) *Location {
r.Relationships = append(r.Relationships, relationship)
return r
}

View file

@ -0,0 +1,6 @@
package sarif
// NewLocationWithPhysicalLocation creates a Location with a PhysicalLocation
func NewLocationWithPhysicalLocation(physicalLocation *PhysicalLocation) *Location {
return NewLocation().WithPhysicalLocation(physicalLocation)
}

View file

@ -0,0 +1,53 @@
package sarif
// LocationRelationship - Information about the relation of one location to another.
type LocationRelationship struct {
// A description of the location relationship.
Description *Message `json:"description,omitempty"`
// A set of distinct strings that categorize the relationship. Well-known kinds include 'includes', 'isIncludedBy' and 'relevant'.
Kinds []string `json:"kinds"`
// Key/value pairs that provide additional information about the location relationship.
Properties *PropertyBag `json:"properties,omitempty"`
// A reference to the related location.
Target *int `json:"target,omitempty"`
}
// NewLocationRelationship - creates a new
func NewLocationRelationship() *LocationRelationship {
return &LocationRelationship{
Kinds: []string{"relevant"},
}
}
// WithDescription - add a Description to the LocationRelationship
func (d *LocationRelationship) WithDescription(description *Message) *LocationRelationship {
d.Description = description
return d
}
// WithKinds - add a Kinds to the LocationRelationship
func (k *LocationRelationship) WithKinds(kinds []string) *LocationRelationship {
k.Kinds = kinds
return k
}
// AddKind - add a single Kind to the LocationRelationship
func (k *LocationRelationship) AddKind(kind string) *LocationRelationship {
k.Kinds = append(k.Kinds, kind)
return k
}
// WithProperties - add a Properties to the LocationRelationship
func (p *LocationRelationship) WithProperties(properties *PropertyBag) *LocationRelationship {
p.Properties = properties
return p
}
// WithTarget - add a Target to the LocationRelationship
func (t *LocationRelationship) WithTarget(target int) *LocationRelationship {
t.Target = &target
return t
}

View file

@ -0,0 +1,75 @@
package sarif
// LogicalLocation - A logical location of a construct that produced a result.
type LogicalLocation struct {
// The machine-readable name for the logical location, such as a mangled function name provided by a C++ compiler that encodes calling convention, return type and other details along with the function name.
DecoratedName *string `json:"decoratedName,omitempty"`
// The human-readable fully qualified name of the logical location.
FullyQualifiedName *string `json:"fullyQualifiedName,omitempty"`
// The index within the logical locations array.
Index int `json:"index"`
// The type of construct this logical location component refers to. Should be one of 'function', 'member', 'module', 'namespace', 'parameter', 'resource', 'returnType', 'type', 'variable', 'object', 'array', 'property', 'value', 'element', 'text', 'attribute', 'comment', 'declaration', 'dtd' or 'processingInstruction', if any of those accurately describe the construct.
Kind *string `json:"kind,omitempty"`
// Identifies the construct in which the result occurred. For example, this property might contain the name of a class or a method.
Name *string `json:"name,omitempty"`
// Identifies the index of the immediate parent of the construct in which the result was detected. For example, this property might point to a logical location that represents the namespace that holds a type.
ParentIndex int `json:"parentIndex"`
// Key/value pairs that provide additional information about the logical location.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewLogicalLocation - creates a new
func NewLogicalLocation() *LogicalLocation {
return &LogicalLocation{
Index: -1,
ParentIndex: -1,
}
}
// WithDecoratedName - add a DecoratedName to the LogicalLocation
func (d *LogicalLocation) WithDecoratedName(decoratedName string) *LogicalLocation {
d.DecoratedName = &decoratedName
return d
}
// WithFullyQualifiedName - add a FullyQualifiedName to the LogicalLocation
func (f *LogicalLocation) WithFullyQualifiedName(fullyQualifiedName string) *LogicalLocation {
f.FullyQualifiedName = &fullyQualifiedName
return f
}
// WithIndex - add a Index to the LogicalLocation
func (i *LogicalLocation) WithIndex(index int) *LogicalLocation {
i.Index = index
return i
}
// WithKind - add a Kind to the LogicalLocation
func (k *LogicalLocation) WithKind(kind string) *LogicalLocation {
k.Kind = &kind
return k
}
// WithName - add a Name to the LogicalLocation
func (n *LogicalLocation) WithName(name string) *LogicalLocation {
n.Name = &name
return n
}
// WithParentIndex - add a ParentIndex to the LogicalLocation
func (p *LogicalLocation) WithParentIndex(parentIndex int) *LogicalLocation {
p.ParentIndex = parentIndex
return p
}
// WithProperties - add a Properties to the LogicalLocation
func (p *LogicalLocation) WithProperties(properties *PropertyBag) *LogicalLocation {
p.Properties = properties
return p
}

View file

@ -0,0 +1,62 @@
package sarif
// Message - Encapsulates a message intended to be read by the end user.
type Message struct {
// An array of strings to substitute into the message string.
Arguments []string `json:"arguments"`
// The identifier for this message.
ID *string `json:"id,omitempty"`
// A Markdown message string.
Markdown *string `json:"markdown,omitempty"`
// Key/value pairs that provide additional information about the message.
Properties *PropertyBag `json:"properties,omitempty"`
// A plain text message string.
Text *string `json:"text,omitempty"`
}
// NewMessage - creates a new
func NewMessage() *Message {
return &Message{
Arguments: make([]string, 0),
}
}
// WithArguments - add a Arguments to the Message
func (a *Message) WithArguments(arguments []string) *Message {
a.Arguments = arguments
return a
}
// AddArgument - add a single Argument to the Message
func (a *Message) AddArgument(argument string) *Message {
a.Arguments = append(a.Arguments, argument)
return a
}
// WithID - add a ID to the Message
func (i *Message) WithID(id string) *Message {
i.ID = &id
return i
}
// WithMarkdown - add a Markdown to the Message
func (m *Message) WithMarkdown(markdown string) *Message {
m.Markdown = &markdown
return m
}
// WithProperties - add a Properties to the Message
func (p *Message) WithProperties(properties *PropertyBag) *Message {
p.Properties = properties
return p
}
// WithText - add a Text to the Message
func (t *Message) WithText(text string) *Message {
t.Text = &text
return t
}

View file

@ -0,0 +1,6 @@
package sarif
// NewTextMessage creates a simple text message
func NewTextMessage(text string) *Message {
return NewMessage().WithText(text)
}

View file

@ -0,0 +1,36 @@
package sarif
// MultiformatMessageString - A message string or message format string rendered in multiple formats.
type MultiformatMessageString struct {
// A Markdown message string or format string.
Markdown *string `json:"markdown,omitempty"`
// Key/value pairs that provide additional information about the message.
Properties *PropertyBag `json:"properties,omitempty"`
// A plain text message string or format string.
Text *string `json:"text,omitempty"`
}
// NewMultiformatMessageString - creates a new
func NewMultiformatMessageString() *MultiformatMessageString {
return &MultiformatMessageString{}
}
// WithMarkdown - add a Markdown to the MultiformatMessageString
func (m *MultiformatMessageString) WithMarkdown(markdown string) *MultiformatMessageString {
m.Markdown = &markdown
return m
}
// WithProperties - add a Properties to the MultiformatMessageString
func (p *MultiformatMessageString) WithProperties(properties *PropertyBag) *MultiformatMessageString {
p.Properties = properties
return p
}
// WithText - add a Text to the MultiformatMessageString
func (t *MultiformatMessageString) WithText(text string) *MultiformatMessageString {
t.Text = &text
return t
}

View file

@ -0,0 +1,62 @@
package sarif
// Node - Represents a node in a graph.
type Node struct {
// Array of child nodes.
Children []*Node `json:"children"`
// A string that uniquely identifies the node within its graph.
ID *string `json:"id,omitempty"`
// A short description of the node.
Label *Message `json:"label,omitempty"`
// A code location associated with the node.
Location *Location `json:"location,omitempty"`
// Key/value pairs that provide additional information about the node.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewNode - creates a new
func NewNode() *Node {
return &Node{
Children: make([]*Node, 0),
}
}
// WithChildren - add a Children to the Node
func (c *Node) WithChildren(children []*Node) *Node {
c.Children = children
return c
}
// AddChildren - add a single Children to the Node
func (c *Node) AddChildren(children *Node) *Node {
c.Children = append(c.Children, children)
return c
}
// WithID - add a ID to the Node
func (i *Node) WithID(id string) *Node {
i.ID = &id
return i
}
// WithLabel - add a Label to the Node
func (l *Node) WithLabel(label *Message) *Node {
l.Label = label
return l
}
// WithLocation - add a Location to the Node
func (l *Node) WithLocation(location *Location) *Node {
l.Location = location
return l
}
// WithProperties - add a Properties to the Node
func (p *Node) WithProperties(properties *PropertyBag) *Node {
p.Properties = properties
return p
}

View file

@ -0,0 +1,99 @@
package sarif
// Notification - Describes a condition relevant to the tool itself, as opposed to being relevant to a target being analyzed by the tool.
type Notification struct {
// A reference used to locate the rule descriptor associated with this notification.
AssociatedRule *ReportingDescriptorReference `json:"associatedRule,omitempty"`
// A reference used to locate the descriptor relevant to this notification.
Descriptor *ReportingDescriptorReference `json:"descriptor,omitempty"`
// The runtime exception, if any, relevant to this notification.
Exception *Exception `json:"exception,omitempty"`
// A value specifying the severity level of the notification.
Level string `json:"level"`
// The locations relevant to this notification.
Locations []*Location `json:"locations"`
// A message that describes the condition that was encountered.
Message *Message `json:"message,omitempty"`
// Key/value pairs that provide additional information about the notification.
Properties *PropertyBag `json:"properties,omitempty"`
// The thread identifier of the code that generated the notification.
ThreadID *int `json:"threadId,omitempty"`
// The Coordinated Universal Time (UTC) date and time at which the analysis tool generated the notification.
TimeUtc *string `json:"timeUtc,omitempty"`
}
// NewNotification - creates a new
func NewNotification() *Notification {
return &Notification{
Level: "warning",
Locations: make([]*Location, 0),
}
}
// WithAssociatedRule - add a AssociatedRule to the Notification
func (a *Notification) WithAssociatedRule(associatedRule *ReportingDescriptorReference) *Notification {
a.AssociatedRule = associatedRule
return a
}
// WithDescriptor - add a Descriptor to the Notification
func (d *Notification) WithDescriptor(descriptor *ReportingDescriptorReference) *Notification {
d.Descriptor = descriptor
return d
}
// WithException - add a Exception to the Notification
func (e *Notification) WithException(exception *Exception) *Notification {
e.Exception = exception
return e
}
// WithLevel - add a Level to the Notification
func (l *Notification) WithLevel(level string) *Notification {
l.Level = level
return l
}
// WithLocations - add a Locations to the Notification
func (l *Notification) WithLocations(locations []*Location) *Notification {
l.Locations = locations
return l
}
// AddLocation - add a single Location to the Notification
func (l *Notification) AddLocation(location *Location) *Notification {
l.Locations = append(l.Locations, location)
return l
}
// WithMessage - add a Message to the Notification
func (m *Notification) WithMessage(message *Message) *Notification {
m.Message = message
return m
}
// WithProperties - add a Properties to the Notification
func (p *Notification) WithProperties(properties *PropertyBag) *Notification {
p.Properties = properties
return p
}
// WithThreadID - add a ThreadID to the Notification
func (t *Notification) WithThreadID(threadId int) *Notification {
t.ThreadID = &threadId
return t
}
// WithTimeUtc - add a TimeUtc to the Notification
func (t *Notification) WithTimeUtc(timeUtc string) *Notification {
t.TimeUtc = &timeUtc
return t
}

View file

@ -0,0 +1,54 @@
package sarif
// PhysicalLocation - A physical location relevant to a result. Specifies a reference to a programming artifact together with a range of bytes or characters within that artifact.
type PhysicalLocation struct {
// The address of the location.
Address *Address `json:"address,omitempty"`
// The location of the artifact.
ArtifactLocation *ArtifactLocation `json:"artifactLocation,omitempty"`
// Specifies a portion of the artifact that encloses the region. Allows a viewer to display additional context around the region.
ContextRegion *Region `json:"contextRegion,omitempty"`
// Key/value pairs that provide additional information about the physical location.
Properties *PropertyBag `json:"properties,omitempty"`
// Specifies a portion of the artifact.
Region *Region `json:"region,omitempty"`
}
// NewPhysicalLocation - creates a new
func NewPhysicalLocation() *PhysicalLocation {
return &PhysicalLocation{}
}
// WithAddress - add a Address to the PhysicalLocation
func (a *PhysicalLocation) WithAddress(address *Address) *PhysicalLocation {
a.Address = address
return a
}
// WithArtifactLocation - add a ArtifactLocation to the PhysicalLocation
func (a *PhysicalLocation) WithArtifactLocation(artifactLocation *ArtifactLocation) *PhysicalLocation {
a.ArtifactLocation = artifactLocation
return a
}
// WithContextRegion - add a ContextRegion to the PhysicalLocation
func (c *PhysicalLocation) WithContextRegion(contextRegion *Region) *PhysicalLocation {
c.ContextRegion = contextRegion
return c
}
// WithProperties - add a Properties to the PhysicalLocation
func (p *PhysicalLocation) WithProperties(properties *PropertyBag) *PhysicalLocation {
p.Properties = properties
return p
}
// WithRegion - add a Region to the PhysicalLocation
func (r *PhysicalLocation) WithRegion(region *Region) *PhysicalLocation {
r.Region = region
return r
}

View file

@ -0,0 +1,3 @@
package sarif
type Properties map[string]interface{}

View file

@ -0,0 +1,36 @@
package sarif
// PropertyBag - Key/value pairs that provide additional information about the object.
type PropertyBag struct {
// AdditionalProperties - additional properties
Properties Properties `json:"properties,omitempty"`
// A set of distinct strings that provide additional information.
Tags []string `json:"tags"`
}
// NewPropertyBag - creates a new
func NewPropertyBag() *PropertyBag {
return &PropertyBag{
Properties: make(Properties),
Tags: make([]string, 0),
}
}
// AddProperty - add a property to the properties
func (p *PropertyBag) Add(key string, value interface{}) *PropertyBag {
p.Properties[key] = value
return p
}
// WithTags - add a Tags to the PropertyBag
func (t *PropertyBag) WithTags(tags []string) *PropertyBag {
t.Tags = tags
return t
}
// AddTag - add a single Tag to the PropertyBag
func (t *PropertyBag) AddTag(tag string) *PropertyBag {
t.Tags = append(t.Tags, tag)
return t
}

View file

@ -0,0 +1,36 @@
package sarif
import "encoding/json"
// MarshalJSON - custom JSON marshaller for PropertyBag
func (p PropertyBag) MarshalJSON() ([]byte, error) {
// type Alias PropertyBag
aux := make(map[string]interface{})
for k, v := range p.Properties {
aux[k] = v
}
if len(p.Tags) > 0 {
aux["tags"] = p.Tags
}
return json.Marshal(aux)
}
// UnmarshalJSON - custom JSON unmarshaller for PropertyBag
func (p *PropertyBag) UnmarshalJSON(data []byte) error {
// type Alias PropertyBag
aux := struct {
Tags []string `json:"tags,omitempty"`
}{}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
p.Tags = aux.Tags
var raw map[string]interface{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
delete(raw, "tags")
p.Properties = raw
return nil
}

View file

@ -0,0 +1,63 @@
package sarif
// Rectangle - An area within an image.
type Rectangle struct {
// The Y coordinate of the bottom edge of the rectangle, measured in the image's natural units.
Bottom *float64 `json:"bottom,omitempty"`
// The X coordinate of the left edge of the rectangle, measured in the image's natural units.
Left *float64 `json:"left,omitempty"`
// A message relevant to the rectangle.
Message *Message `json:"message,omitempty"`
// Key/value pairs that provide additional information about the rectangle.
Properties *PropertyBag `json:"properties,omitempty"`
// The X coordinate of the right edge of the rectangle, measured in the image's natural units.
Right *float64 `json:"right,omitempty"`
// The Y coordinate of the top edge of the rectangle, measured in the image's natural units.
Top *float64 `json:"top,omitempty"`
}
// NewRectangle - creates a new
func NewRectangle() *Rectangle {
return &Rectangle{}
}
// WithBottom - add a Bottom to the Rectangle
func (b *Rectangle) WithBottom(bottom float64) *Rectangle {
b.Bottom = &bottom
return b
}
// WithLeft - add a Left to the Rectangle
func (l *Rectangle) WithLeft(left float64) *Rectangle {
l.Left = &left
return l
}
// WithMessage - add a Message to the Rectangle
func (m *Rectangle) WithMessage(message *Message) *Rectangle {
m.Message = message
return m
}
// WithProperties - add a Properties to the Rectangle
func (p *Rectangle) WithProperties(properties *PropertyBag) *Rectangle {
p.Properties = properties
return p
}
// WithRight - add a Right to the Rectangle
func (r *Rectangle) WithRight(right float64) *Rectangle {
r.Right = &right
return r
}
// WithTop - add a Top to the Rectangle
func (t *Rectangle) WithTop(top float64) *Rectangle {
t.Top = &top
return t
}

View file

@ -0,0 +1,120 @@
package sarif
// Region - A region within an artifact where a result was detected.
type Region struct {
// The length of the region in bytes.
ByteLength *int `json:"byteLength,omitempty"`
// The zero-based offset from the beginning of the artifact of the first byte in the region.
ByteOffset int `json:"byteOffset"`
// The length of the region in characters.
CharLength *int `json:"charLength,omitempty"`
// The zero-based offset from the beginning of the artifact of the first character in the region.
CharOffset int `json:"charOffset"`
// The column number of the character following the end of the region.
EndColumn *int `json:"endColumn,omitempty"`
// The line number of the last character in the region.
EndLine *int `json:"endLine,omitempty"`
// A message relevant to the region.
Message *Message `json:"message,omitempty"`
// Key/value pairs that provide additional information about the region.
Properties *PropertyBag `json:"properties,omitempty"`
// The portion of the artifact contents within the specified region.
Snippet *ArtifactContent `json:"snippet,omitempty"`
// Specifies the source language, if any, of the portion of the artifact specified by the region object.
SourceLanguage *string `json:"sourceLanguage,omitempty"`
// The column number of the first character in the region.
StartColumn *int `json:"startColumn,omitempty"`
// The line number of the first character in the region.
StartLine *int `json:"startLine,omitempty"`
}
// NewRegion - creates a new
func NewRegion() *Region {
return &Region{
ByteOffset: -1,
CharOffset: -1,
}
}
// WithByteLength - add a ByteLength to the Region
func (b *Region) WithByteLength(byteLength int) *Region {
b.ByteLength = &byteLength
return b
}
// WithByteOffset - add a ByteOffset to the Region
func (b *Region) WithByteOffset(byteOffset int) *Region {
b.ByteOffset = byteOffset
return b
}
// WithCharLength - add a CharLength to the Region
func (c *Region) WithCharLength(charLength int) *Region {
c.CharLength = &charLength
return c
}
// WithCharOffset - add a CharOffset to the Region
func (c *Region) WithCharOffset(charOffset int) *Region {
c.CharOffset = charOffset
return c
}
// WithEndColumn - add a EndColumn to the Region
func (e *Region) WithEndColumn(endColumn int) *Region {
e.EndColumn = &endColumn
return e
}
// WithEndLine - add a EndLine to the Region
func (e *Region) WithEndLine(endLine int) *Region {
e.EndLine = &endLine
return e
}
// WithMessage - add a Message to the Region
func (m *Region) WithMessage(message *Message) *Region {
m.Message = message
return m
}
// WithProperties - add a Properties to the Region
func (p *Region) WithProperties(properties *PropertyBag) *Region {
p.Properties = properties
return p
}
// WithSnippet - add a Snippet to the Region
func (s *Region) WithSnippet(snippet *ArtifactContent) *Region {
s.Snippet = snippet
return s
}
// WithSourceLanguage - add a SourceLanguage to the Region
func (s *Region) WithSourceLanguage(sourceLanguage string) *Region {
s.SourceLanguage = &sourceLanguage
return s
}
// WithStartColumn - add a StartColumn to the Region
func (s *Region) WithStartColumn(startColumn int) *Region {
s.StartColumn = &startColumn
return s
}
// WithStartLine - add a StartLine to the Region
func (s *Region) WithStartLine(startLine int) *Region {
s.StartLine = &startLine
return s
}

View file

@ -0,0 +1,8 @@
package sarif
// NewSimpleRegion creates a new Region with the start and end line
func NewSimpleRegion(startLine, endLine int) *Region {
return NewRegion().
WithStartLine(startLine).
WithEndLine(endLine)
}

View file

@ -0,0 +1,36 @@
package sarif
// Replacement - The replacement of a single region of an artifact.
type Replacement struct {
// The region of the artifact to delete.
DeletedRegion *Region `json:"deletedRegion,omitempty"`
// The content to insert at the location specified by the 'deletedRegion' property.
InsertedContent *ArtifactContent `json:"insertedContent,omitempty"`
// Key/value pairs that provide additional information about the replacement.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewReplacement - creates a new
func NewReplacement() *Replacement {
return &Replacement{}
}
// WithDeletedRegion - add a DeletedRegion to the Replacement
func (d *Replacement) WithDeletedRegion(deletedRegion *Region) *Replacement {
d.DeletedRegion = deletedRegion
return d
}
// WithInsertedContent - add a InsertedContent to the Replacement
func (i *Replacement) WithInsertedContent(insertedContent *ArtifactContent) *Replacement {
i.InsertedContent = insertedContent
return i
}
// WithProperties - add a Properties to the Replacement
func (p *Replacement) WithProperties(properties *PropertyBag) *Replacement {
p.Properties = properties
return p
}

View file

@ -0,0 +1,58 @@
package sarif
// ReportingConfiguration - Information about a rule or notification that can be configured at runtime.
type ReportingConfiguration struct {
// Specifies whether the report may be produced during the scan.
Enabled bool `json:"enabled"`
// Specifies the failure level for the report.
Level string `json:"level,omitempty"`
// Contains configuration information specific to a report.
Parameters *PropertyBag `json:"parameters,omitempty"`
// Key/value pairs that provide additional information about the reporting configuration.
Properties *PropertyBag `json:"properties,omitempty"`
// Specifies the relative priority of the report. Used for analysis output only.
Rank float64 `json:"rank"`
}
// NewReportingConfiguration - creates a new
func NewReportingConfiguration() *ReportingConfiguration {
return &ReportingConfiguration{
Enabled: true,
Level: "warning",
Rank: -1.000000,
}
}
// WithEnabled - add a Enabled to the ReportingConfiguration
func (e *ReportingConfiguration) WithEnabled(enabled bool) *ReportingConfiguration {
e.Enabled = enabled
return e
}
// WithLevel - add a Level to the ReportingConfiguration
func (l *ReportingConfiguration) WithLevel(level string) *ReportingConfiguration {
l.Level = level
return l
}
// WithParameters - add a Parameters to the ReportingConfiguration
func (p *ReportingConfiguration) WithParameters(parameters *PropertyBag) *ReportingConfiguration {
p.Parameters = parameters
return p
}
// WithProperties - add a Properties to the ReportingConfiguration
func (p *ReportingConfiguration) WithProperties(properties *PropertyBag) *ReportingConfiguration {
p.Properties = properties
return p
}
// WithRank - add a Rank to the ReportingConfiguration
func (r *ReportingConfiguration) WithRank(rank float64) *ReportingConfiguration {
r.Rank = rank
return r
}

View file

@ -0,0 +1,170 @@
package sarif
// ReportingDescriptor - Metadata that describes a specific report produced by the tool, as part of the analysis it provides or its runtime reporting.
type ReportingDescriptor struct {
// A set of name/value pairs with arbitrary names. Each value is a multiformatMessageString object, which holds message strings in plain text and (optionally) Markdown format. The strings can include placeholders, which can be used to construct a message in combination with an arbitrary number of additional string arguments.
MessageStrings map[string]MultiformatMessageString `json:"messageStrings,omitempty"`
// Default reporting configuration information.
DefaultConfiguration *ReportingConfiguration `json:"defaultConfiguration,omitempty"`
// An array of unique identifies in the form of a GUID by which this report was known in some previous version of the analysis tool.
DeprecatedGuids []string `json:"deprecatedGuids,omitempty"`
// An array of stable, opaque identifiers by which this report was known in some previous version of the analysis tool.
DeprecatedIds []string `json:"deprecatedIds,omitempty"`
// An array of readable identifiers by which this report was known in some previous version of the analysis tool.
DeprecatedNames []string `json:"deprecatedNames,omitempty"`
// A description of the report. Should, as far as possible, provide details sufficient to enable resolution of any problem indicated by the result.
FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"`
// A unique identifier for the reporting descriptor in the form of a GUID.
GuID *string `json:"guid,omitempty"`
// Provides the primary documentation for the report, useful when there is no online documentation.
Help *MultiformatMessageString `json:"help,omitempty"`
// A URI where the primary documentation for the report can be found.
HelpURI *string `json:"helpUri,omitempty"`
// A stable, opaque identifier for the report.
ID *string `json:"id,omitempty"`
// A report identifier that is understandable to an end user.
Name *string `json:"name,omitempty"`
// Key/value pairs that provide additional information about the report.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of objects that describe relationships between this reporting descriptor and others.
Relationships []*ReportingDescriptorRelationship `json:"relationships"`
// A concise description of the report. Should be a single sentence that is understandable when visible space is limited to a single line of text.
ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"`
}
// NewReportingDescriptor - creates a new
func NewReportingDescriptor() *ReportingDescriptor {
return &ReportingDescriptor{
DeprecatedGuids: make([]string, 0),
DeprecatedIds: make([]string, 0),
DeprecatedNames: make([]string, 0),
Relationships: make([]*ReportingDescriptorRelationship, 0),
}
}
// AddMessageString - add a single MessageString to the ReportingDescriptor
func (m *ReportingDescriptor) AddMessageString(key string, messageString MultiformatMessageString) *ReportingDescriptor {
m.MessageStrings[key] = messageString
return m
}
// WithMessageStrings - add a MessageStrings to the ReportingDescriptor
func (m *ReportingDescriptor) WithMessageStrings(messageStrings map[string]MultiformatMessageString) *ReportingDescriptor {
m.MessageStrings = messageStrings
return m
}
// WithDefaultConfiguration - add a DefaultConfiguration to the ReportingDescriptor
func (d *ReportingDescriptor) WithDefaultConfiguration(defaultConfiguration *ReportingConfiguration) *ReportingDescriptor {
d.DefaultConfiguration = defaultConfiguration
return d
}
// WithDeprecatedGuids - add a DeprecatedGuids to the ReportingDescriptor
func (d *ReportingDescriptor) WithDeprecatedGuids(deprecatedGuids []string) *ReportingDescriptor {
d.DeprecatedGuids = deprecatedGuids
return d
}
// AddDeprecatedGuid - add a single DeprecatedGuid to the ReportingDescriptor
func (d *ReportingDescriptor) AddDeprecatedGuid(deprecatedGuid string) *ReportingDescriptor {
d.DeprecatedGuids = append(d.DeprecatedGuids, deprecatedGuid)
return d
}
// WithDeprecatedIds - add a DeprecatedIds to the ReportingDescriptor
func (d *ReportingDescriptor) WithDeprecatedIds(deprecatedIds []string) *ReportingDescriptor {
d.DeprecatedIds = deprecatedIds
return d
}
// AddDeprecatedId - add a single DeprecatedId to the ReportingDescriptor
func (d *ReportingDescriptor) AddDeprecatedId(deprecatedId string) *ReportingDescriptor {
d.DeprecatedIds = append(d.DeprecatedIds, deprecatedId)
return d
}
// WithDeprecatedNames - add a DeprecatedNames to the ReportingDescriptor
func (d *ReportingDescriptor) WithDeprecatedNames(deprecatedNames []string) *ReportingDescriptor {
d.DeprecatedNames = deprecatedNames
return d
}
// AddDeprecatedName - add a single DeprecatedName to the ReportingDescriptor
func (d *ReportingDescriptor) AddDeprecatedName(deprecatedName string) *ReportingDescriptor {
d.DeprecatedNames = append(d.DeprecatedNames, deprecatedName)
return d
}
// WithFullDescription - add a FullDescription to the ReportingDescriptor
func (f *ReportingDescriptor) WithFullDescription(fullDescription *MultiformatMessageString) *ReportingDescriptor {
f.FullDescription = fullDescription
return f
}
// WithGuID - add a GuID to the ReportingDescriptor
func (g *ReportingDescriptor) WithGuID(guid string) *ReportingDescriptor {
g.GuID = &guid
return g
}
// WithHelp - add a Help to the ReportingDescriptor
func (h *ReportingDescriptor) WithHelp(help *MultiformatMessageString) *ReportingDescriptor {
h.Help = help
return h
}
// WithHelpURI - add a HelpURI to the ReportingDescriptor
func (h *ReportingDescriptor) WithHelpURI(helpUri string) *ReportingDescriptor {
h.HelpURI = &helpUri
return h
}
// WithID - add a ID to the ReportingDescriptor
func (i *ReportingDescriptor) WithID(id string) *ReportingDescriptor {
i.ID = &id
return i
}
// WithName - add a Name to the ReportingDescriptor
func (n *ReportingDescriptor) WithName(name string) *ReportingDescriptor {
n.Name = &name
return n
}
// WithProperties - add a Properties to the ReportingDescriptor
func (p *ReportingDescriptor) WithProperties(properties *PropertyBag) *ReportingDescriptor {
p.Properties = properties
return p
}
// WithRelationships - add a Relationships to the ReportingDescriptor
func (r *ReportingDescriptor) WithRelationships(relationships []*ReportingDescriptorRelationship) *ReportingDescriptor {
r.Relationships = relationships
return r
}
// AddRelationship - add a single Relationship to the ReportingDescriptor
func (r *ReportingDescriptor) AddRelationship(relationship *ReportingDescriptorRelationship) *ReportingDescriptor {
r.Relationships = append(r.Relationships, relationship)
return r
}
// WithShortDescription - add a ShortDescription to the ReportingDescriptor
func (s *ReportingDescriptor) WithShortDescription(shortDescription *MultiformatMessageString) *ReportingDescriptor {
s.ShortDescription = shortDescription
return s
}

View file

@ -0,0 +1,24 @@
package sarif
// NewRule creates a new Rule and returns a pointer to it
func NewRule(ruleID string) *ReportingDescriptor {
return NewReportingDescriptor().WithID(ruleID)
}
// WithDescription specifies short description for a rule and returns the updated rule.
// Short description should be a single sentence that is understandable when visible space is limited to a single line
// of text.
func (rule *ReportingDescriptor) WithDescription(description string) *ReportingDescriptor {
rule.ShortDescription = NewMultiformatMessageString().WithText(description)
return rule
}
// WithMarkdownHelp specifies a help text for a rule and returns the updated rule
func (rule *ReportingDescriptor) WithMarkdownHelp(markdown string) *ReportingDescriptor {
if rule.Help == nil {
rule.Help = NewMultiformatMessageString()
}
rule.Help.Text = &markdown
rule.Help.Markdown = &markdown
return rule
}

View file

@ -0,0 +1,56 @@
package sarif
// ReportingDescriptorReference - Information about how to locate a relevant reporting descriptor.
type ReportingDescriptorReference struct {
// A guid that uniquely identifies the descriptor.
GuID *string `json:"guid,omitempty"`
// The id of the descriptor.
ID *string `json:"id,omitempty"`
// The index into an array of descriptors in toolComponent.ruleDescriptors, toolComponent.notificationDescriptors, or toolComponent.taxonomyDescriptors, depending on context.
Index int `json:"index"`
// Key/value pairs that provide additional information about the reporting descriptor reference.
Properties *PropertyBag `json:"properties,omitempty"`
// A reference used to locate the toolComponent associated with the descriptor.
ToolComponent *ToolComponentReference `json:"toolComponent,omitempty"`
}
// NewReportingDescriptorReference - creates a new
func NewReportingDescriptorReference() *ReportingDescriptorReference {
return &ReportingDescriptorReference{
Index: -1,
}
}
// WithGuID - add a GuID to the ReportingDescriptorReference
func (g *ReportingDescriptorReference) WithGuID(guid string) *ReportingDescriptorReference {
g.GuID = &guid
return g
}
// WithID - add a ID to the ReportingDescriptorReference
func (i *ReportingDescriptorReference) WithID(id string) *ReportingDescriptorReference {
i.ID = &id
return i
}
// WithIndex - add a Index to the ReportingDescriptorReference
func (i *ReportingDescriptorReference) WithIndex(index int) *ReportingDescriptorReference {
i.Index = index
return i
}
// WithProperties - add a Properties to the ReportingDescriptorReference
func (p *ReportingDescriptorReference) WithProperties(properties *PropertyBag) *ReportingDescriptorReference {
p.Properties = properties
return p
}
// WithToolComponent - add a ToolComponent to the ReportingDescriptorReference
func (t *ReportingDescriptorReference) WithToolComponent(toolComponent *ToolComponentReference) *ReportingDescriptorReference {
t.ToolComponent = toolComponent
return t
}

View file

@ -0,0 +1,53 @@
package sarif
// ReportingDescriptorRelationship - Information about the relation of one reporting descriptor to another.
type ReportingDescriptorRelationship struct {
// A description of the reporting descriptor relationship.
Description *Message `json:"description,omitempty"`
// A set of distinct strings that categorize the relationship. Well-known kinds include 'canPrecede', 'canFollow', 'willPrecede', 'willFollow', 'superset', 'subset', 'equal', 'disjoint', 'relevant', and 'incomparable'.
Kinds []string `json:"kinds"`
// Key/value pairs that provide additional information about the reporting descriptor reference.
Properties *PropertyBag `json:"properties,omitempty"`
// A reference to the related reporting descriptor.
Target *ReportingDescriptorReference `json:"target,omitempty"`
}
// NewReportingDescriptorRelationship - creates a new
func NewReportingDescriptorRelationship() *ReportingDescriptorRelationship {
return &ReportingDescriptorRelationship{
Kinds: []string{"relevant"},
}
}
// WithDescription - add a Description to the ReportingDescriptorRelationship
func (d *ReportingDescriptorRelationship) WithDescription(description *Message) *ReportingDescriptorRelationship {
d.Description = description
return d
}
// WithKinds - add a Kinds to the ReportingDescriptorRelationship
func (k *ReportingDescriptorRelationship) WithKinds(kinds []string) *ReportingDescriptorRelationship {
k.Kinds = kinds
return k
}
// AddKind - add a single Kind to the ReportingDescriptorRelationship
func (k *ReportingDescriptorRelationship) AddKind(kind string) *ReportingDescriptorRelationship {
k.Kinds = append(k.Kinds, kind)
return k
}
// WithProperties - add a Properties to the ReportingDescriptorRelationship
func (p *ReportingDescriptorRelationship) WithProperties(properties *PropertyBag) *ReportingDescriptorRelationship {
p.Properties = properties
return p
}
// WithTarget - add a Target to the ReportingDescriptorRelationship
func (t *ReportingDescriptorRelationship) WithTarget(target *ReportingDescriptorReference) *ReportingDescriptorRelationship {
t.Target = target
return t
}

View file

@ -0,0 +1,373 @@
package sarif
// Result - A result produced by an analysis tool.
type Result struct {
// A set of strings that contribute to the stable, unique identity of the result.
PartialFingerprints map[string]string `json:"partialFingerprints,omitempty"`
// A set of strings each of which individually defines a stable, unique identity for the result.
Fingerprints map[string]string `json:"fingerprints,omitempty"`
// Identifies the artifact that the analysis tool was instructed to scan. This need not be the same as the artifact where the result actually occurred.
AnalysisTarget *ArtifactLocation `json:"analysisTarget,omitempty"`
// A set of artifacts relevant to the result.
Attachments []*Attachment `json:"attachments"`
// The state of a result relative to a baseline of a previous run.
BaselineState *string `json:"baselineState,omitempty"`
// An array of 'codeFlow' objects relevant to the result.
CodeFlows []*CodeFlow `json:"codeFlows"`
// A stable, unique identifier for the equivalence class of logically identical results to which this result belongs, in the form of a GUID.
CorrelationGuID *string `json:"correlationGuid,omitempty"`
// An array of 'fix' objects, each of which represents a proposed fix to the problem indicated by the result.
Fixes []*Fix `json:"fixes"`
// An array of one or more unique 'graphTraversal' objects.
GraphTraversals []*GraphTraversal `json:"graphTraversals"`
// An array of zero or more unique graph objects associated with the result.
Graphs []*Graph `json:"graphs"`
// A stable, unique identifier for the result in the form of a GUID.
GuID *string `json:"guid,omitempty"`
// An absolute URI at which the result can be viewed.
HostedViewerURI *string `json:"hostedViewerUri,omitempty"`
// A value that categorizes results by evaluation state.
Kind string `json:"kind,omitempty"`
// A value specifying the severity level of the result.
Level string `json:"level,omitempty"`
// The set of locations where the result was detected. Specify only one location unless the problem indicated by the result can only be corrected by making a change at every specified location.
Locations []*Location `json:"locations"`
// A message that describes the result. The first sentence of the message only will be displayed when visible space is limited.
Message *Message `json:"message,omitempty"`
// A positive integer specifying the number of times this logically unique result was observed in this run.
OccurrenceCount *int `json:"occurrenceCount,omitempty"`
// Key/value pairs that provide additional information about the result.
Properties *PropertyBag `json:"properties,omitempty"`
// Information about how and when the result was detected.
Provenance *ResultProvenance `json:"provenance,omitempty"`
// A number representing the priority or importance of the result.
Rank float64 `json:"rank"`
// A set of locations relevant to this result.
RelatedLocations []*Location `json:"relatedLocations"`
// A reference used to locate the rule descriptor relevant to this result.
Rule *ReportingDescriptorReference `json:"rule,omitempty"`
// The stable, unique identifier of the rule, if any, to which this result is relevant.
RuleID *string `json:"ruleId,omitempty"`
// The index within the tool component rules array of the rule object associated with this result.
RuleIndex int `json:"ruleIndex"`
// An array of 'stack' objects relevant to the result.
Stacks []*Stack `json:"stacks"`
// A set of suppressions relevant to this result.
Suppressions []*Suppression `json:"suppressions,omitempty"`
// An array of references to taxonomy reporting descriptors that are applicable to the result.
Taxa []*ReportingDescriptorReference `json:"taxa"`
// A web request associated with this result.
WebRequest *WebRequest `json:"webRequest,omitempty"`
// A web response associated with this result.
WebResponse *WebResponse `json:"webResponse,omitempty"`
// The URIs of the work items associated with this result.
WorkItemUris []string `json:"workItemUris,omitempty"`
}
// NewResult - creates a new
func NewResult() *Result {
return &Result{
Attachments: make([]*Attachment, 0),
CodeFlows: make([]*CodeFlow, 0),
Fixes: make([]*Fix, 0),
GraphTraversals: make([]*GraphTraversal, 0),
Graphs: make([]*Graph, 0),
Kind: "fail",
Level: "warning",
Locations: make([]*Location, 0),
Rank: -1.000000,
RelatedLocations: make([]*Location, 0),
RuleIndex: -1,
Stacks: make([]*Stack, 0),
Suppressions: make([]*Suppression, 0),
Taxa: make([]*ReportingDescriptorReference, 0),
WorkItemUris: make([]string, 0),
}
}
// AddPartialFingerprint - add a single PartialFingerprint to the Result
func (p *Result) AddPartialFingerprint(key, partialFingerprint string) *Result {
p.PartialFingerprints[key] = partialFingerprint
return p
}
// WithPartialFingerprints - add a PartialFingerprints to the Result
func (p *Result) WithPartialFingerprints(partialFingerprints map[string]string) *Result {
p.PartialFingerprints = partialFingerprints
return p
}
// AddFingerprint - add a single Fingerprint to the Result
func (f *Result) AddFingerprint(key, fingerprint string) *Result {
f.Fingerprints[key] = fingerprint
return f
}
// WithFingerprints - add a Fingerprints to the Result
func (f *Result) WithFingerprints(fingerprints map[string]string) *Result {
f.Fingerprints = fingerprints
return f
}
// WithAnalysisTarget - add a AnalysisTarget to the Result
func (a *Result) WithAnalysisTarget(analysisTarget *ArtifactLocation) *Result {
a.AnalysisTarget = analysisTarget
return a
}
// WithAttachments - add a Attachments to the Result
func (a *Result) WithAttachments(attachments []*Attachment) *Result {
a.Attachments = attachments
return a
}
// AddAttachment - add a single Attachment to the Result
func (a *Result) AddAttachment(attachment *Attachment) *Result {
a.Attachments = append(a.Attachments, attachment)
return a
}
// WithBaselineState - add a BaselineState to the Result
func (b *Result) WithBaselineState(baselineState string) *Result {
b.BaselineState = &baselineState
return b
}
// WithCodeFlows - add a CodeFlows to the Result
func (c *Result) WithCodeFlows(codeFlows []*CodeFlow) *Result {
c.CodeFlows = codeFlows
return c
}
// AddCodeFlow - add a single CodeFlow to the Result
func (c *Result) AddCodeFlow(codeFlow *CodeFlow) *Result {
c.CodeFlows = append(c.CodeFlows, codeFlow)
return c
}
// WithCorrelationGuID - add a CorrelationGuID to the Result
func (c *Result) WithCorrelationGuID(correlationGuid string) *Result {
c.CorrelationGuID = &correlationGuid
return c
}
// WithFixes - add a Fixes to the Result
func (f *Result) WithFixes(fixes []*Fix) *Result {
f.Fixes = fixes
return f
}
// AddFixe - add a single Fixe to the Result
func (f *Result) AddFixe(fixe *Fix) *Result {
f.Fixes = append(f.Fixes, fixe)
return f
}
// WithGraphTraversals - add a GraphTraversals to the Result
func (g *Result) WithGraphTraversals(graphTraversals []*GraphTraversal) *Result {
g.GraphTraversals = graphTraversals
return g
}
// AddGraphTraversal - add a single GraphTraversal to the Result
func (g *Result) AddGraphTraversal(graphTraversal *GraphTraversal) *Result {
g.GraphTraversals = append(g.GraphTraversals, graphTraversal)
return g
}
// WithGraphs - add a Graphs to the Result
func (g *Result) WithGraphs(graphs []*Graph) *Result {
g.Graphs = graphs
return g
}
// AddGraph - add a single Graph to the Result
func (g *Result) AddGraph(graph *Graph) *Result {
g.Graphs = append(g.Graphs, graph)
return g
}
// WithGuID - add a GuID to the Result
func (g *Result) WithGuID(guid string) *Result {
g.GuID = &guid
return g
}
// WithHostedViewerURI - add a HostedViewerURI to the Result
func (h *Result) WithHostedViewerURI(hostedViewerUri string) *Result {
h.HostedViewerURI = &hostedViewerUri
return h
}
// WithKind - add a Kind to the Result
func (k *Result) WithKind(kind string) *Result {
k.Kind = kind
return k
}
// WithLevel - add a Level to the Result
func (l *Result) WithLevel(level string) *Result {
l.Level = level
return l
}
// WithLocations - add a Locations to the Result
func (l *Result) WithLocations(locations []*Location) *Result {
l.Locations = locations
return l
}
// AddLocation - add a single Location to the Result
func (l *Result) AddLocation(location *Location) *Result {
l.Locations = append(l.Locations, location)
return l
}
// WithMessage - add a Message to the Result
func (m *Result) WithMessage(message *Message) *Result {
m.Message = message
return m
}
// WithOccurrenceCount - add a OccurrenceCount to the Result
func (o *Result) WithOccurrenceCount(occurrenceCount int) *Result {
o.OccurrenceCount = &occurrenceCount
return o
}
// WithProperties - add a Properties to the Result
func (p *Result) WithProperties(properties *PropertyBag) *Result {
p.Properties = properties
return p
}
// WithProvenance - add a Provenance to the Result
func (p *Result) WithProvenance(provenance *ResultProvenance) *Result {
p.Provenance = provenance
return p
}
// WithRank - add a Rank to the Result
func (r *Result) WithRank(rank float64) *Result {
r.Rank = rank
return r
}
// WithRelatedLocations - add a RelatedLocations to the Result
func (r *Result) WithRelatedLocations(relatedLocations []*Location) *Result {
r.RelatedLocations = relatedLocations
return r
}
// AddRelatedLocation - add a single RelatedLocation to the Result
func (r *Result) AddRelatedLocation(relatedLocation *Location) *Result {
r.RelatedLocations = append(r.RelatedLocations, relatedLocation)
return r
}
// WithRule - add a Rule to the Result
func (r *Result) WithRule(rule *ReportingDescriptorReference) *Result {
r.Rule = rule
return r
}
// WithRuleID - add a RuleID to the Result
func (r *Result) WithRuleID(ruleId string) *Result {
r.RuleID = &ruleId
return r
}
// WithRuleIndex - add a RuleIndex to the Result
func (r *Result) WithRuleIndex(ruleIndex int) *Result {
r.RuleIndex = ruleIndex
return r
}
// WithStacks - add a Stacks to the Result
func (s *Result) WithStacks(stacks []*Stack) *Result {
s.Stacks = stacks
return s
}
// AddStack - add a single Stack to the Result
func (s *Result) AddStack(stack *Stack) *Result {
s.Stacks = append(s.Stacks, stack)
return s
}
// WithSuppressions - add a Suppressions to the Result
func (s *Result) WithSuppressions(suppressions []*Suppression) *Result {
s.Suppressions = suppressions
return s
}
// AddSuppression - add a single Suppression to the Result
func (s *Result) AddSuppression(suppression *Suppression) *Result {
s.Suppressions = append(s.Suppressions, suppression)
return s
}
// WithTaxa - add a Taxa to the Result
func (t *Result) WithTaxa(taxa []*ReportingDescriptorReference) *Result {
t.Taxa = taxa
return t
}
// AddTaxa - add a single Taxa to the Result
func (t *Result) AddTaxa(taxa *ReportingDescriptorReference) *Result {
t.Taxa = append(t.Taxa, taxa)
return t
}
// WithWebRequest - add a WebRequest to the Result
func (w *Result) WithWebRequest(webRequest *WebRequest) *Result {
w.WebRequest = webRequest
return w
}
// WithWebResponse - add a WebResponse to the Result
func (w *Result) WithWebResponse(webResponse *WebResponse) *Result {
w.WebResponse = webResponse
return w
}
// WithWorkItemUris - add a WorkItemUris to the Result
func (w *Result) WithWorkItemUris(workItemUris []string) *Result {
w.WorkItemUris = workItemUris
return w
}
// AddWorkItemUri - add a single WorkItemUri to the Result
func (w *Result) AddWorkItemUri(workItemUri string) *Result {
w.WorkItemUris = append(w.WorkItemUris, workItemUri)
return w
}

View file

@ -0,0 +1,6 @@
package sarif
// NewRuleResult - creates a new result with the ruleID set
func NewRuleResult(ruleID string) *Result {
return NewResult().WithRuleID(ruleID)
}

View file

@ -0,0 +1,81 @@
package sarif
// ResultProvenance - Contains information about how and when a result was detected.
type ResultProvenance struct {
// An array of physicalLocation objects which specify the portions of an analysis tool's output that a converter transformed into the result.
ConversionSources []*PhysicalLocation `json:"conversionSources"`
// A GUID-valued string equal to the automationDetails.guid property of the run in which the result was first detected.
FirstDetectionRunGuID *string `json:"firstDetectionRunGuid,omitempty"`
// The Coordinated Universal Time (UTC) date and time at which the result was first detected. See "Date/time properties" in the SARIF spec for the required format.
FirstDetectionTimeUtc *string `json:"firstDetectionTimeUtc,omitempty"`
// The index within the run.invocations array of the invocation object which describes the tool invocation that detected the result.
InvocationIndex int `json:"invocationIndex"`
// A GUID-valued string equal to the automationDetails.guid property of the run in which the result was most recently detected.
LastDetectionRunGuID *string `json:"lastDetectionRunGuid,omitempty"`
// The Coordinated Universal Time (UTC) date and time at which the result was most recently detected. See "Date/time properties" in the SARIF spec for the required format.
LastDetectionTimeUtc *string `json:"lastDetectionTimeUtc,omitempty"`
// Key/value pairs that provide additional information about the result.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewResultProvenance - creates a new
func NewResultProvenance() *ResultProvenance {
return &ResultProvenance{
ConversionSources: make([]*PhysicalLocation, 0),
InvocationIndex: -1,
}
}
// WithConversionSources - add a ConversionSources to the ResultProvenance
func (c *ResultProvenance) WithConversionSources(conversionSources []*PhysicalLocation) *ResultProvenance {
c.ConversionSources = conversionSources
return c
}
// AddConversionSource - add a single ConversionSource to the ResultProvenance
func (c *ResultProvenance) AddConversionSource(conversionSource *PhysicalLocation) *ResultProvenance {
c.ConversionSources = append(c.ConversionSources, conversionSource)
return c
}
// WithFirstDetectionRunGuID - add a FirstDetectionRunGuID to the ResultProvenance
func (f *ResultProvenance) WithFirstDetectionRunGuID(firstDetectionRunGuid string) *ResultProvenance {
f.FirstDetectionRunGuID = &firstDetectionRunGuid
return f
}
// WithFirstDetectionTimeUtc - add a FirstDetectionTimeUtc to the ResultProvenance
func (f *ResultProvenance) WithFirstDetectionTimeUtc(firstDetectionTimeUtc string) *ResultProvenance {
f.FirstDetectionTimeUtc = &firstDetectionTimeUtc
return f
}
// WithInvocationIndex - add a InvocationIndex to the ResultProvenance
func (i *ResultProvenance) WithInvocationIndex(invocationIndex int) *ResultProvenance {
i.InvocationIndex = invocationIndex
return i
}
// WithLastDetectionRunGuID - add a LastDetectionRunGuID to the ResultProvenance
func (l *ResultProvenance) WithLastDetectionRunGuID(lastDetectionRunGuid string) *ResultProvenance {
l.LastDetectionRunGuID = &lastDetectionRunGuid
return l
}
// WithLastDetectionTimeUtc - add a LastDetectionTimeUtc to the ResultProvenance
func (l *ResultProvenance) WithLastDetectionTimeUtc(lastDetectionTimeUtc string) *ResultProvenance {
l.LastDetectionTimeUtc = &lastDetectionTimeUtc
return l
}
// WithProperties - add a Properties to the ResultProvenance
func (p *ResultProvenance) WithProperties(properties *PropertyBag) *ResultProvenance {
p.Properties = properties
return p
}

View file

@ -0,0 +1,381 @@
package sarif
// Run - Describes a single run of an analysis tool, and contains the reported output of that run.
type Run struct {
// The artifact location specified by each uriBaseId symbol on the machine where the tool originally ran.
OriginalUriBaseIds map[string]ArtifactLocation `json:"originalUriBaseIds,omitempty"`
// Addresses associated with this run instance, if any.
Addresses []*Address `json:"addresses"`
// An array of artifact objects relevant to the run.
Artifacts []*Artifact `json:"artifacts,omitempty"`
// Automation details that describe this run.
AutomationDetails *RunAutomationDetails `json:"automationDetails,omitempty"`
// The 'guid' property of a previous SARIF 'run' that comprises the baseline that was used to compute result 'baselineState' properties for the run.
BaselineGuID *string `json:"baselineGuid,omitempty"`
// Specifies the unit in which the tool measures columns.
ColumnKind *string `json:"columnKind,omitempty"`
// A conversion object that describes how a converter transformed an analysis tool's native reporting format into the SARIF format.
Conversion *Conversion `json:"conversion,omitempty"`
// Specifies the default encoding for any artifact object that refers to a text file.
DefaultEncoding *string `json:"defaultEncoding,omitempty"`
// Specifies the default source language for any artifact object that refers to a text file that contains source code.
DefaultSourceLanguage *string `json:"defaultSourceLanguage,omitempty"`
// References to external property files that should be inlined with the content of a root log file.
ExternalPropertyFileReferences *ExternalPropertyFileReferences `json:"externalPropertyFileReferences,omitempty"`
// An array of zero or more unique graph objects associated with the run.
Graphs []*Graph `json:"graphs"`
// Describes the invocation of the analysis tool.
Invocations []*Invocation `json:"invocations"`
// The language of the messages emitted into the log file during this run (expressed as an ISO 639-1 two-letter lowercase culture code) and an optional region (expressed as an ISO 3166-1 two-letter uppercase subculture code associated with a country or region). The casing is recommended but not required (in order for this data to conform to RFC5646).
Language string `json:"language"`
// An array of logical locations such as namespaces, types or functions.
LogicalLocations []*LogicalLocation `json:"logicalLocations"`
// An ordered list of character sequences that were treated as line breaks when computing region information for the run.
NewlineSequences []string `json:"newlineSequences"`
// Contains configurations that may potentially override both reportingDescriptor.defaultConfiguration (the tool's default severities) and invocation.configurationOverrides (severities established at run-time from the command line).
Policies []*ToolComponent `json:"policies"`
// Key/value pairs that provide additional information about the run.
Properties *PropertyBag `json:"properties,omitempty"`
// An array of strings used to replace sensitive information in a redaction-aware property.
RedactionTokens []string `json:"redactionTokens"`
// The set of results contained in an SARIF log. The results array can be omitted when a run is solely exporting rules metadata. It must be present (but may be empty) if a log file represents an actual scan.
Results []*Result `json:"results"`
// Automation details that describe the aggregate of runs to which this run belongs.
RunAggregates []*RunAutomationDetails `json:"runAggregates"`
// A specialLocations object that defines locations of special significance to SARIF consumers.
SpecialLocations *SpecialLocations `json:"specialLocations,omitempty"`
// An array of toolComponent objects relevant to a taxonomy in which results are categorized.
Taxonomies []*ToolComponent `json:"taxonomies"`
// An array of threadFlowLocation objects cached at run level.
ThreadFlowLocations []*ThreadFlowLocation `json:"threadFlowLocations"`
// Information about the tool or tool pipeline that generated the results in this run. A run can only contain results produced by a single tool or tool pipeline. A run can aggregate results from multiple log files, as long as context around the tool run (tool command-line arguments and the like) is identical for all aggregated files.
Tool *Tool `json:"tool,omitempty"`
// The set of available translations of the localized data provided by the tool.
Translations []*ToolComponent `json:"translations"`
// Specifies the revision in version control of the artifacts that were scanned.
VersionControlProvenance []*VersionControlDetails `json:"versionControlProvenance"`
// An array of request objects cached at run level.
WebRequests []*WebRequest `json:"webRequests"`
// An array of response objects cached at run level.
WebResponses []*WebResponse `json:"webResponses"`
}
// NewRun - creates a new
func NewRun() *Run {
return &Run{
Addresses: make([]*Address, 0),
Artifacts: make([]*Artifact, 0),
Graphs: make([]*Graph, 0),
Invocations: make([]*Invocation, 0),
Language: "en-US",
LogicalLocations: make([]*LogicalLocation, 0),
NewlineSequences: []string{"\r\n", "\n"},
Policies: make([]*ToolComponent, 0),
RedactionTokens: make([]string, 0),
Results: make([]*Result, 0),
RunAggregates: make([]*RunAutomationDetails, 0),
Taxonomies: make([]*ToolComponent, 0),
ThreadFlowLocations: make([]*ThreadFlowLocation, 0),
Translations: make([]*ToolComponent, 0),
VersionControlProvenance: make([]*VersionControlDetails, 0),
WebRequests: make([]*WebRequest, 0),
WebResponses: make([]*WebResponse, 0),
}
}
// AddOriginalUriBaseId - add a single OriginalUriBaseId to the Run
func (o *Run) AddOriginalUriBaseId(key string, originalUriBaseId ArtifactLocation) *Run {
o.OriginalUriBaseIds[key] = originalUriBaseId
return o
}
// WithOriginalUriBaseIds - add a OriginalUriBaseIds to the Run
func (o *Run) WithOriginalUriBaseIds(originalUriBaseIds map[string]ArtifactLocation) *Run {
o.OriginalUriBaseIds = originalUriBaseIds
return o
}
// WithAddresses - add a Addresses to the Run
func (a *Run) WithAddresses(addresses []*Address) *Run {
a.Addresses = addresses
return a
}
// AddAddresse - add a single Addresse to the Run
func (a *Run) AddAddresse(addresse *Address) *Run {
a.Addresses = append(a.Addresses, addresse)
return a
}
// WithArtifacts - add a Artifacts to the Run
func (a *Run) WithArtifacts(artifacts []*Artifact) *Run {
a.Artifacts = artifacts
return a
}
// AddArtifact - add a single Artifact to the Run
func (a *Run) AddArtifact(artifact *Artifact) *Run {
a.Artifacts = append(a.Artifacts, artifact)
return a
}
// WithAutomationDetails - add a AutomationDetails to the Run
func (a *Run) WithAutomationDetails(automationDetails *RunAutomationDetails) *Run {
a.AutomationDetails = automationDetails
return a
}
// WithBaselineGuID - add a BaselineGuID to the Run
func (b *Run) WithBaselineGuID(baselineGuid string) *Run {
b.BaselineGuID = &baselineGuid
return b
}
// WithColumnKind - add a ColumnKind to the Run
func (c *Run) WithColumnKind(columnKind string) *Run {
c.ColumnKind = &columnKind
return c
}
// WithConversion - add a Conversion to the Run
func (c *Run) WithConversion(conversion *Conversion) *Run {
c.Conversion = conversion
return c
}
// WithDefaultEncoding - add a DefaultEncoding to the Run
func (d *Run) WithDefaultEncoding(defaultEncoding string) *Run {
d.DefaultEncoding = &defaultEncoding
return d
}
// WithDefaultSourceLanguage - add a DefaultSourceLanguage to the Run
func (d *Run) WithDefaultSourceLanguage(defaultSourceLanguage string) *Run {
d.DefaultSourceLanguage = &defaultSourceLanguage
return d
}
// WithExternalPropertyFileReferences - add a ExternalPropertyFileReferences to the Run
func (e *Run) WithExternalPropertyFileReferences(externalPropertyFileReferences *ExternalPropertyFileReferences) *Run {
e.ExternalPropertyFileReferences = externalPropertyFileReferences
return e
}
// WithGraphs - add a Graphs to the Run
func (g *Run) WithGraphs(graphs []*Graph) *Run {
g.Graphs = graphs
return g
}
// AddGraph - add a single Graph to the Run
func (g *Run) AddGraph(graph *Graph) *Run {
g.Graphs = append(g.Graphs, graph)
return g
}
// WithInvocations - add a Invocations to the Run
func (i *Run) WithInvocations(invocations []*Invocation) *Run {
i.Invocations = invocations
return i
}
// AddInvocation - add a single Invocation to the Run
func (i *Run) AddInvocation(invocation *Invocation) *Run {
i.Invocations = append(i.Invocations, invocation)
return i
}
// WithLanguage - add a Language to the Run
func (l *Run) WithLanguage(language string) *Run {
l.Language = language
return l
}
// WithLogicalLocations - add a LogicalLocations to the Run
func (l *Run) WithLogicalLocations(logicalLocations []*LogicalLocation) *Run {
l.LogicalLocations = logicalLocations
return l
}
// AddLogicalLocation - add a single LogicalLocation to the Run
func (l *Run) AddLogicalLocation(logicalLocation *LogicalLocation) *Run {
l.LogicalLocations = append(l.LogicalLocations, logicalLocation)
return l
}
// WithNewlineSequences - add a NewlineSequences to the Run
func (n *Run) WithNewlineSequences(newlineSequences []string) *Run {
n.NewlineSequences = newlineSequences
return n
}
// AddNewlineSequence - add a single NewlineSequence to the Run
func (n *Run) AddNewlineSequence(newlineSequence string) *Run {
n.NewlineSequences = append(n.NewlineSequences, newlineSequence)
return n
}
// WithPolicies - add a Policies to the Run
func (p *Run) WithPolicies(policies []*ToolComponent) *Run {
p.Policies = policies
return p
}
// AddPolicie - add a single Policie to the Run
func (p *Run) AddPolicie(policie *ToolComponent) *Run {
p.Policies = append(p.Policies, policie)
return p
}
// WithProperties - add a Properties to the Run
func (p *Run) WithProperties(properties *PropertyBag) *Run {
p.Properties = properties
return p
}
// WithRedactionTokens - add a RedactionTokens to the Run
func (r *Run) WithRedactionTokens(redactionTokens []string) *Run {
r.RedactionTokens = redactionTokens
return r
}
// AddRedactionToken - add a single RedactionToken to the Run
func (r *Run) AddRedactionToken(redactionToken string) *Run {
r.RedactionTokens = append(r.RedactionTokens, redactionToken)
return r
}
// WithResults - add a Results to the Run
func (r *Run) WithResults(results []*Result) *Run {
r.Results = results
return r
}
// AddResult - add a single Result to the Run
func (r *Run) AddResult(result *Result) *Run {
r.Results = append(r.Results, result)
return r
}
// WithRunAggregates - add a RunAggregates to the Run
func (r *Run) WithRunAggregates(runAggregates []*RunAutomationDetails) *Run {
r.RunAggregates = runAggregates
return r
}
// AddRunAggregate - add a single RunAggregate to the Run
func (r *Run) AddRunAggregate(runAggregate *RunAutomationDetails) *Run {
r.RunAggregates = append(r.RunAggregates, runAggregate)
return r
}
// WithSpecialLocations - add a SpecialLocations to the Run
func (s *Run) WithSpecialLocations(specialLocations *SpecialLocations) *Run {
s.SpecialLocations = specialLocations
return s
}
// WithTaxonomies - add a Taxonomies to the Run
func (t *Run) WithTaxonomies(taxonomies []*ToolComponent) *Run {
t.Taxonomies = taxonomies
return t
}
// AddTaxonomie - add a single Taxonomie to the Run
func (t *Run) AddTaxonomie(taxonomie *ToolComponent) *Run {
t.Taxonomies = append(t.Taxonomies, taxonomie)
return t
}
// WithThreadFlowLocations - add a ThreadFlowLocations to the Run
func (t *Run) WithThreadFlowLocations(threadFlowLocations []*ThreadFlowLocation) *Run {
t.ThreadFlowLocations = threadFlowLocations
return t
}
// AddThreadFlowLocation - add a single ThreadFlowLocation to the Run
func (t *Run) AddThreadFlowLocation(threadFlowLocation *ThreadFlowLocation) *Run {
t.ThreadFlowLocations = append(t.ThreadFlowLocations, threadFlowLocation)
return t
}
// WithTool - add a Tool to the Run
func (t *Run) WithTool(tool *Tool) *Run {
t.Tool = tool
return t
}
// WithTranslations - add a Translations to the Run
func (t *Run) WithTranslations(translations []*ToolComponent) *Run {
t.Translations = translations
return t
}
// AddTranslation - add a single Translation to the Run
func (t *Run) AddTranslation(translation *ToolComponent) *Run {
t.Translations = append(t.Translations, translation)
return t
}
// WithVersionControlProvenance - add a VersionControlProvenance to the Run
func (v *Run) WithVersionControlProvenance(versionControlProvenance []*VersionControlDetails) *Run {
v.VersionControlProvenance = versionControlProvenance
return v
}
// AddVersionControlProvenance - add a single VersionControlProvenance to the Run
func (v *Run) AddVersionControlProvenance(versionControlProvenance *VersionControlDetails) *Run {
v.VersionControlProvenance = append(v.VersionControlProvenance, versionControlProvenance)
return v
}
// WithWebRequests - add a WebRequests to the Run
func (w *Run) WithWebRequests(webRequests []*WebRequest) *Run {
w.WebRequests = webRequests
return w
}
// AddWebRequest - add a single WebRequest to the Run
func (w *Run) AddWebRequest(webRequest *WebRequest) *Run {
w.WebRequests = append(w.WebRequests, webRequest)
return w
}
// WithWebResponses - add a WebResponses to the Run
func (w *Run) WithWebResponses(webResponses []*WebResponse) *Run {
w.WebResponses = webResponses
return w
}
// AddWebResponse - add a single WebResponse to the Run
func (w *Run) AddWebResponse(webResponse *WebResponse) *Run {
w.WebResponses = append(w.WebResponses, webResponse)
return w
}

View file

@ -0,0 +1,54 @@
package sarif
// RunAutomationDetails - Information that describes a run's identity and role within an engineering system process.
type RunAutomationDetails struct {
// A stable, unique identifier for the equivalence class of runs to which this object's containing run object belongs in the form of a GUID.
CorrelationGuID *string `json:"correlationGuid,omitempty"`
// A description of the identity and role played within the engineering system by this object's containing run object.
Description *Message `json:"description,omitempty"`
// A stable, unique identifier for this object's containing run object in the form of a GUID.
GuID *string `json:"guid,omitempty"`
// A hierarchical string that uniquely identifies this object's containing run object.
ID *string `json:"id,omitempty"`
// Key/value pairs that provide additional information about the run automation details.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewRunAutomationDetails - creates a new
func NewRunAutomationDetails() *RunAutomationDetails {
return &RunAutomationDetails{}
}
// WithCorrelationGuID - add a CorrelationGuID to the RunAutomationDetails
func (c *RunAutomationDetails) WithCorrelationGuID(correlationGuid string) *RunAutomationDetails {
c.CorrelationGuID = &correlationGuid
return c
}
// WithDescription - add a Description to the RunAutomationDetails
func (d *RunAutomationDetails) WithDescription(description *Message) *RunAutomationDetails {
d.Description = description
return d
}
// WithGuID - add a GuID to the RunAutomationDetails
func (g *RunAutomationDetails) WithGuID(guid string) *RunAutomationDetails {
g.GuID = &guid
return g
}
// WithID - add a ID to the RunAutomationDetails
func (i *RunAutomationDetails) WithID(id string) *RunAutomationDetails {
i.ID = &id
return i
}
// WithProperties - add a Properties to the RunAutomationDetails
func (p *RunAutomationDetails) WithProperties(properties *PropertyBag) *RunAutomationDetails {
p.Properties = properties
return p
}

View file

@ -0,0 +1,67 @@
package sarif
import "github.com/owenrumney/go-sarif/v3/pkg/report/utils"
// AddRule returns an existing ReportingDescriptor for the ruleID or creates a new ReportingDescriptor and returns a pointer to it
func (run *Run) AddRule(ruleID string) *ReportingDescriptor {
for _, rule := range run.Tool.Driver.Rules {
if *rule.ID == ruleID {
return rule
}
}
rule := NewRule(ruleID)
run.Tool.Driver.Rules = append(run.Tool.Driver.Rules, rule)
return rule
}
// GetRuleIndex returns the index of the rule with the given ID, or -1 if it does not exist
func (run *Run) GetRuleIndex(ruleID string) int {
if run.Tool == nil || run.Tool.Driver == nil || run.Tool.Driver.Rules == nil || ruleID == "" {
return -1
}
for i, rule := range run.Tool.Driver.Rules {
if *rule.ID == ruleID {
return i
}
}
return -1
}
// AddDistinctArtifact will handle deduplication of simple artifact additions
func (run *Run) AddDistinctArtifact(uri string) *Artifact {
for _, artifact := range run.Artifacts {
if *artifact.Location.URI == uri {
return artifact
}
}
a := NewArtifact().WithLength(utils.DefaultLengthInt)
a.WithLocation(NewSimpleArtifactLocation(uri))
run.Artifacts = append(run.Artifacts, a)
return a
}
// CreateResultForRule returns an existing Result or creates a new one and returns a pointer to it
func (run *Run) CreateResultForRule(ruleID string) *Result {
ruleIndex := run.GetRuleIndex(ruleID)
result := NewRuleResult(ruleID).WithRuleIndex(ruleIndex)
run.AddResult(result)
return result
}
// DedupeArtifacts will remove any duplicate artifacts from the run
func (run *Run) DedupeArtifacts() error {
dupes := map[*Artifact]bool{}
deduped := []*Artifact{}
for _, a := range run.Artifacts {
if _, ok := dupes[a]; !ok {
dupes[a] = true
deduped = append(deduped, a)
}
}
run.Artifacts = deduped
return nil
}

View file

@ -0,0 +1,135 @@
package sarif
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/xeipuuv/gojsonschema"
)
type Report struct {
// The URI of the JSON schema corresponding to the version.
Schema string `json:"$schema"`
// The SARIF format version of this log file.
Version string `json:"version"`
// The set of runs contained in this log file.
Runs []*Run `json:"runs"`
// References to external property files that should be inlined with the content of a root log file.
InlineExternalProperties []*ExternalProperties `json:"inlineExternalProperties,omitempty"`
// Key/value pairs that provide additional information about the report.
Properties PropertyBag `json:"properties,omitempty"`
}
func NewReport() *Report {
return &Report{
Schema: "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/main/sarif-2.1/schema/sarif-schema-2.1.0.json",
Version: "2.1.0",
Runs: []*Run{},
InlineExternalProperties: []*ExternalProperties{},
Properties: *NewPropertyBag(),
}
}
// AddRun adds a run to the report
func (r *Report) AddRun(run *Run) *Report {
r.Runs = append(r.Runs, run)
return r
}
// Validate validates the report against the SARIF schema
func (r *Report) Validate() error {
schemaLoader := gojsonschema.NewStringLoader(schema)
documentLoader := gojsonschema.NewGoLoader(r)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if err != nil {
return err
}
var errors []string
if !result.Valid() {
for _, desc := range result.Errors() {
errors = append(errors, fmt.Sprintf("%s\n", desc.String()))
}
return fmt.Errorf("validation failed: %v", errors)
}
return nil
}
// NewRunWithInformationURI creates a new Run and returns a pointer to it
func NewRunWithInformationURI(toolName, informationURI string) *Run {
run := NewRun()
run.Tool = NewTool()
run.Tool.Driver = NewToolComponent()
run.Tool.Driver.Name = &toolName
run.Tool.Driver.InformationURI = &informationURI
return run
}
// Open loads a Report from a file
func Open(filename string) (*Report, error) {
if _, err := os.Stat(filename); err != nil && os.IsNotExist(err) {
return nil, errors.New("the provided file path doesn't have a file")
}
content, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("the provided filepath could not be opened. %w", err)
}
return FromBytes(content)
}
// FromString loads a Report from string content
func FromString(content string) (*Report, error) {
return FromBytes([]byte(content))
}
// FromBytes loads a Report from a byte array
func FromBytes(content []byte) (*Report, error) {
var report Report
if err := json.Unmarshal(content, &report); err != nil {
return nil, err
}
return &report, nil
}
// WriteFile will write the report to a file using a pretty formatter
func (sarif *Report) WriteFile(filename string) error {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
return sarif.PrettyWrite(file)
}
// Write writes the JSON as a string with no formatting
func (sarif *Report) Write(w io.Writer) error {
for _, run := range sarif.Runs {
run.DedupeArtifacts()
}
marshal, err := json.Marshal(sarif)
if err != nil {
return err
}
_, err = w.Write(marshal)
return err
}
// PrettyWrite writes the JSON output with indentation
func (sarif *Report) PrettyWrite(w io.Writer) error {
marshal, err := json.MarshalIndent(sarif, "", " ")
if err != nil {
return err
}
_, err = w.Write(marshal)
return err
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
package sarif
// SpecialLocations - Defines locations of special significance to SARIF consumers.
type SpecialLocations struct {
// Provides a suggestion to SARIF consumers to display file paths relative to the specified location.
DisplayBase *ArtifactLocation `json:"displayBase,omitempty"`
// Key/value pairs that provide additional information about the special locations.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewSpecialLocations - creates a new
func NewSpecialLocations() *SpecialLocations {
return &SpecialLocations{}
}
// WithDisplayBase - add a DisplayBase to the SpecialLocations
func (d *SpecialLocations) WithDisplayBase(displayBase *ArtifactLocation) *SpecialLocations {
d.DisplayBase = displayBase
return d
}
// WithProperties - add a Properties to the SpecialLocations
func (p *SpecialLocations) WithProperties(properties *PropertyBag) *SpecialLocations {
p.Properties = properties
return p
}

View file

@ -0,0 +1,44 @@
package sarif
// Stack - A call stack that is relevant to a result.
type Stack struct {
// An array of stack frames that represents a sequence of calls, rendered in reverse chronological order, that comprise the call stack.
Frames []*StackFrame `json:"frames,omitempty"`
// A message relevant to this call stack.
Message *Message `json:"message,omitempty"`
// Key/value pairs that provide additional information about the stack.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewStack - creates a new
func NewStack() *Stack {
return &Stack{
Frames: make([]*StackFrame, 0),
}
}
// WithFrames - add a Frames to the Stack
func (f *Stack) WithFrames(frames []*StackFrame) *Stack {
f.Frames = frames
return f
}
// AddFrame - add a single Frame to the Stack
func (f *Stack) AddFrame(frame *StackFrame) *Stack {
f.Frames = append(f.Frames, frame)
return f
}
// WithMessage - add a Message to the Stack
func (m *Stack) WithMessage(message *Message) *Stack {
m.Message = message
return m
}
// WithProperties - add a Properties to the Stack
func (p *Stack) WithProperties(properties *PropertyBag) *Stack {
p.Properties = properties
return p
}

View file

@ -0,0 +1,62 @@
package sarif
// StackFrame - A function call within a stack trace.
type StackFrame struct {
// The location to which this stack frame refers.
Location *Location `json:"location,omitempty"`
// The name of the module that contains the code of this stack frame.
Module *string `json:"module,omitempty"`
// The parameters of the call that is executing.
Parameters []string `json:"parameters"`
// Key/value pairs that provide additional information about the stack frame.
Properties *PropertyBag `json:"properties,omitempty"`
// The thread identifier of the stack frame.
ThreadID *int `json:"threadId,omitempty"`
}
// NewStackFrame - creates a new
func NewStackFrame() *StackFrame {
return &StackFrame{
Parameters: make([]string, 0),
}
}
// WithLocation - add a Location to the StackFrame
func (l *StackFrame) WithLocation(location *Location) *StackFrame {
l.Location = location
return l
}
// WithModule - add a Module to the StackFrame
func (m *StackFrame) WithModule(module string) *StackFrame {
m.Module = &module
return m
}
// WithParameters - add a Parameters to the StackFrame
func (p *StackFrame) WithParameters(parameters []string) *StackFrame {
p.Parameters = parameters
return p
}
// AddParameter - add a single Parameter to the StackFrame
func (p *StackFrame) AddParameter(parameter string) *StackFrame {
p.Parameters = append(p.Parameters, parameter)
return p
}
// WithProperties - add a Properties to the StackFrame
func (p *StackFrame) WithProperties(properties *PropertyBag) *StackFrame {
p.Properties = properties
return p
}
// WithThreadID - add a ThreadID to the StackFrame
func (t *StackFrame) WithThreadID(threadId int) *StackFrame {
t.ThreadID = &threadId
return t
}

View file

@ -0,0 +1,63 @@
package sarif
// Suppression - A suppression that is relevant to a result.
type Suppression struct {
// A stable, unique identifier for the suprression in the form of a GUID.
GuID *string `json:"guid,omitempty"`
// A string representing the justification for the suppression.
Justification *string `json:"justification,omitempty"`
// A string that indicates where the suppression is persisted.
Kind *string `json:"kind,omitempty"`
// Identifies the location associated with the suppression.
Location *Location `json:"location,omitempty"`
// Key/value pairs that provide additional information about the suppression.
Properties *PropertyBag `json:"properties,omitempty"`
// A string that indicates the review status of the suppression.
Status *string `json:"status,omitempty"`
}
// NewSuppression - creates a new
func NewSuppression() *Suppression {
return &Suppression{}
}
// WithGuID - add a GuID to the Suppression
func (g *Suppression) WithGuID(guid string) *Suppression {
g.GuID = &guid
return g
}
// WithJustification - add a Justification to the Suppression
func (j *Suppression) WithJustification(justification string) *Suppression {
j.Justification = &justification
return j
}
// WithKind - add a Kind to the Suppression
func (k *Suppression) WithKind(kind string) *Suppression {
k.Kind = &kind
return k
}
// WithLocation - add a Location to the Suppression
func (l *Suppression) WithLocation(location *Location) *Suppression {
l.Location = location
return l
}
// WithProperties - add a Properties to the Suppression
func (p *Suppression) WithProperties(properties *PropertyBag) *Suppression {
p.Properties = properties
return p
}
// WithStatus - add a Status to the Suppression
func (s *Suppression) WithStatus(status string) *Suppression {
s.Status = &status
return s
}

View file

@ -0,0 +1,83 @@
package sarif
// ThreadFlow - Describes a sequence of code locations that specify a path through a single thread of execution such as an operating system or fiber.
type ThreadFlow struct {
// Values of relevant expressions at the start of the thread flow that may change during thread flow execution.
InitialState map[string]MultiformatMessageString `json:"initialState,omitempty"`
// Values of relevant expressions at the start of the thread flow that remain constant.
ImmutableState map[string]MultiformatMessageString `json:"immutableState,omitempty"`
// An string that uniquely identifies the threadFlow within the codeFlow in which it occurs.
ID *string `json:"id,omitempty"`
// A temporally ordered array of 'threadFlowLocation' objects, each of which describes a location visited by the tool while producing the result.
Locations []*ThreadFlowLocation `json:"locations,omitempty"`
// A message relevant to the thread flow.
Message *Message `json:"message,omitempty"`
// Key/value pairs that provide additional information about the thread flow.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewThreadFlow - creates a new
func NewThreadFlow() *ThreadFlow {
return &ThreadFlow{
Locations: make([]*ThreadFlowLocation, 0),
}
}
// AddInitialState - add a single InitialState to the ThreadFlow
func (i *ThreadFlow) AddInitialState(key string, initialState MultiformatMessageString) *ThreadFlow {
i.InitialState[key] = initialState
return i
}
// WithInitialState - add a InitialState to the ThreadFlow
func (i *ThreadFlow) WithInitialState(initialState map[string]MultiformatMessageString) *ThreadFlow {
i.InitialState = initialState
return i
}
// AddImmutableState - add a single ImmutableState to the ThreadFlow
func (i *ThreadFlow) AddImmutableState(key string, immutableState MultiformatMessageString) *ThreadFlow {
i.ImmutableState[key] = immutableState
return i
}
// WithImmutableState - add a ImmutableState to the ThreadFlow
func (i *ThreadFlow) WithImmutableState(immutableState map[string]MultiformatMessageString) *ThreadFlow {
i.ImmutableState = immutableState
return i
}
// WithID - add a ID to the ThreadFlow
func (i *ThreadFlow) WithID(id string) *ThreadFlow {
i.ID = &id
return i
}
// WithLocations - add a Locations to the ThreadFlow
func (l *ThreadFlow) WithLocations(locations []*ThreadFlowLocation) *ThreadFlow {
l.Locations = locations
return l
}
// AddLocation - add a single Location to the ThreadFlow
func (l *ThreadFlow) AddLocation(location *ThreadFlowLocation) *ThreadFlow {
l.Locations = append(l.Locations, location)
return l
}
// WithMessage - add a Message to the ThreadFlow
func (m *ThreadFlow) WithMessage(message *Message) *ThreadFlow {
m.Message = message
return m
}
// WithProperties - add a Properties to the ThreadFlow
func (p *ThreadFlow) WithProperties(properties *PropertyBag) *ThreadFlow {
p.Properties = properties
return p
}

View file

@ -0,0 +1,159 @@
package sarif
// ThreadFlowLocation - A location visited by an analysis tool while simulating or monitoring the execution of a program.
type ThreadFlowLocation struct {
// A dictionary, each of whose keys specifies a variable or expression, the associated value of which represents the variable or expression value. For an annotation of kind 'continuation', for example, this dictionary might hold the current assumed values of a set of global variables.
State map[string]MultiformatMessageString `json:"state,omitempty"`
// An integer representing the temporal order in which execution reached this location.
ExecutionOrder int `json:"executionOrder"`
// The Coordinated Universal Time (UTC) date and time at which this location was executed.
ExecutionTimeUtc *string `json:"executionTimeUtc,omitempty"`
// Specifies the importance of this location in understanding the code flow in which it occurs. The order from most to least important is "essential", "important", "unimportant". Default: "important".
Importance string `json:"importance"`
// The index within the run threadFlowLocations array.
Index int `json:"index"`
// A set of distinct strings that categorize the thread flow location. Well-known kinds include 'acquire', 'release', 'enter', 'exit', 'call', 'return', 'branch', 'implicit', 'false', 'true', 'caution', 'danger', 'unknown', 'unreachable', 'taint', 'function', 'handler', 'lock', 'memory', 'resource', 'scope' and 'value'.
Kinds []string `json:"kinds"`
// The code location.
Location *Location `json:"location,omitempty"`
// The name of the module that contains the code that is executing.
Module *string `json:"module,omitempty"`
// An integer representing a containment hierarchy within the thread flow.
NestingLevel *int `json:"nestingLevel,omitempty"`
// Key/value pairs that provide additional information about the threadflow location.
Properties *PropertyBag `json:"properties,omitempty"`
// The call stack leading to this location.
Stack *Stack `json:"stack,omitempty"`
// An array of references to rule or taxonomy reporting descriptors that are applicable to the thread flow location.
Taxa []*ReportingDescriptorReference `json:"taxa"`
// A web request associated with this thread flow location.
WebRequest *WebRequest `json:"webRequest,omitempty"`
// A web response associated with this thread flow location.
WebResponse *WebResponse `json:"webResponse,omitempty"`
}
// NewThreadFlowLocation - creates a new
func NewThreadFlowLocation() *ThreadFlowLocation {
return &ThreadFlowLocation{
ExecutionOrder: -1,
Importance: "important",
Index: -1,
Kinds: make([]string, 0),
Taxa: make([]*ReportingDescriptorReference, 0),
}
}
// AddState - add a single State to the ThreadFlowLocation
func (s *ThreadFlowLocation) AddState(key string, state MultiformatMessageString) *ThreadFlowLocation {
s.State[key] = state
return s
}
// WithState - add a State to the ThreadFlowLocation
func (s *ThreadFlowLocation) WithState(state map[string]MultiformatMessageString) *ThreadFlowLocation {
s.State = state
return s
}
// WithExecutionOrder - add a ExecutionOrder to the ThreadFlowLocation
func (e *ThreadFlowLocation) WithExecutionOrder(executionOrder int) *ThreadFlowLocation {
e.ExecutionOrder = executionOrder
return e
}
// WithExecutionTimeUtc - add a ExecutionTimeUtc to the ThreadFlowLocation
func (e *ThreadFlowLocation) WithExecutionTimeUtc(executionTimeUtc string) *ThreadFlowLocation {
e.ExecutionTimeUtc = &executionTimeUtc
return e
}
// WithImportance - add a Importance to the ThreadFlowLocation
func (i *ThreadFlowLocation) WithImportance(importance string) *ThreadFlowLocation {
i.Importance = importance
return i
}
// WithIndex - add a Index to the ThreadFlowLocation
func (i *ThreadFlowLocation) WithIndex(index int) *ThreadFlowLocation {
i.Index = index
return i
}
// WithKinds - add a Kinds to the ThreadFlowLocation
func (k *ThreadFlowLocation) WithKinds(kinds []string) *ThreadFlowLocation {
k.Kinds = kinds
return k
}
// AddKind - add a single Kind to the ThreadFlowLocation
func (k *ThreadFlowLocation) AddKind(kind string) *ThreadFlowLocation {
k.Kinds = append(k.Kinds, kind)
return k
}
// WithLocation - add a Location to the ThreadFlowLocation
func (l *ThreadFlowLocation) WithLocation(location *Location) *ThreadFlowLocation {
l.Location = location
return l
}
// WithModule - add a Module to the ThreadFlowLocation
func (m *ThreadFlowLocation) WithModule(module string) *ThreadFlowLocation {
m.Module = &module
return m
}
// WithNestingLevel - add a NestingLevel to the ThreadFlowLocation
func (n *ThreadFlowLocation) WithNestingLevel(nestingLevel int) *ThreadFlowLocation {
n.NestingLevel = &nestingLevel
return n
}
// WithProperties - add a Properties to the ThreadFlowLocation
func (p *ThreadFlowLocation) WithProperties(properties *PropertyBag) *ThreadFlowLocation {
p.Properties = properties
return p
}
// WithStack - add a Stack to the ThreadFlowLocation
func (s *ThreadFlowLocation) WithStack(stack *Stack) *ThreadFlowLocation {
s.Stack = stack
return s
}
// WithTaxa - add a Taxa to the ThreadFlowLocation
func (t *ThreadFlowLocation) WithTaxa(taxa []*ReportingDescriptorReference) *ThreadFlowLocation {
t.Taxa = taxa
return t
}
// AddTaxa - add a single Taxa to the ThreadFlowLocation
func (t *ThreadFlowLocation) AddTaxa(taxa *ReportingDescriptorReference) *ThreadFlowLocation {
t.Taxa = append(t.Taxa, taxa)
return t
}
// WithWebRequest - add a WebRequest to the ThreadFlowLocation
func (w *ThreadFlowLocation) WithWebRequest(webRequest *WebRequest) *ThreadFlowLocation {
w.WebRequest = webRequest
return w
}
// WithWebResponse - add a WebResponse to the ThreadFlowLocation
func (w *ThreadFlowLocation) WithWebResponse(webResponse *WebResponse) *ThreadFlowLocation {
w.WebResponse = webResponse
return w
}

View file

@ -0,0 +1,44 @@
package sarif
// Tool - The analysis tool that was run.
type Tool struct {
// The analysis tool that was run.
Driver *ToolComponent `json:"driver,omitempty"`
// Tool extensions that contributed to or reconfigured the analysis tool that was run.
Extensions []*ToolComponent `json:"extensions"`
// Key/value pairs that provide additional information about the tool.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewTool - creates a new
func NewTool() *Tool {
return &Tool{
Extensions: make([]*ToolComponent, 0),
}
}
// WithDriver - add a Driver to the Tool
func (d *Tool) WithDriver(driver *ToolComponent) *Tool {
d.Driver = driver
return d
}
// WithExtensions - add a Extensions to the Tool
func (e *Tool) WithExtensions(extensions []*ToolComponent) *Tool {
e.Extensions = extensions
return e
}
// AddExtension - add a single Extension to the Tool
func (e *Tool) AddExtension(extension *ToolComponent) *Tool {
e.Extensions = append(e.Extensions, extension)
return e
}
// WithProperties - add a Properties to the Tool
func (p *Tool) WithProperties(properties *PropertyBag) *Tool {
p.Properties = properties
return p
}

View file

@ -0,0 +1,312 @@
package sarif
// ToolComponent - A component, such as a plug-in or the driver, of the analysis tool that was run.
type ToolComponent struct {
// A dictionary, each of whose keys is a resource identifier and each of whose values is a multiformatMessageString object, which holds message strings in plain text and (optionally) Markdown format. The strings can include placeholders, which can be used to construct a message in combination with an arbitrary number of additional string arguments.
GlobalMessageStrings map[string]MultiformatMessageString `json:"globalMessageStrings,omitempty"`
// The component which is strongly associated with this component. For a translation, this refers to the component which has been translated. For an extension, this is the driver that provides the extension's plugin model.
AssociatedComponent *ToolComponentReference `json:"associatedComponent,omitempty"`
// The kinds of data contained in this object.
Contents []string `json:"contents"`
// The binary version of the tool component's primary executable file expressed as four non-negative integers separated by a period (for operating systems that express file versions in this way).
DottedQuadFileVersion *string `json:"dottedQuadFileVersion,omitempty"`
// The absolute URI from which the tool component can be downloaded.
DownloadURI *string `json:"downloadUri,omitempty"`
// A comprehensive description of the tool component.
FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"`
// The name of the tool component along with its version and any other useful identifying information, such as its locale.
FullName *string `json:"fullName,omitempty"`
// A unique identifier for the tool component in the form of a GUID.
GuID *string `json:"guid,omitempty"`
// The absolute URI at which information about this version of the tool component can be found.
InformationURI *string `json:"informationUri,omitempty"`
// Specifies whether this object contains a complete definition of the localizable and/or non-localizable data for this component, as opposed to including only data that is relevant to the results persisted to this log file.
IsComprehensive bool `json:"isComprehensive"`
// The language of the messages emitted into the log file during this run (expressed as an ISO 639-1 two-letter lowercase language code) and an optional region (expressed as an ISO 3166-1 two-letter uppercase subculture code associated with a country or region). The casing is recommended but not required (in order for this data to conform to RFC5646).
Language string `json:"language"`
// The semantic version of the localized strings defined in this component; maintained by components that provide translations.
LocalizedDataSemanticVersion *string `json:"localizedDataSemanticVersion,omitempty"`
// An array of the artifactLocation objects associated with the tool component.
Locations []*ArtifactLocation `json:"locations"`
// The minimum value of localizedDataSemanticVersion required in translations consumed by this component; used by components that consume translations.
MinimumRequiredLocalizedDataSemanticVersion *string `json:"minimumRequiredLocalizedDataSemanticVersion,omitempty"`
// The name of the tool component.
Name *string `json:"name,omitempty"`
// An array of reportingDescriptor objects relevant to the notifications related to the configuration and runtime execution of the tool component.
Notifications []*ReportingDescriptor `json:"notifications"`
// The organization or company that produced the tool component.
Organization *string `json:"organization,omitempty"`
// A product suite to which the tool component belongs.
Product *string `json:"product,omitempty"`
// A localizable string containing the name of the suite of products to which the tool component belongs.
ProductSuite *string `json:"productSuite,omitempty"`
// Key/value pairs that provide additional information about the tool component.
Properties *PropertyBag `json:"properties,omitempty"`
// A string specifying the UTC date (and optionally, the time) of the component's release.
ReleaseDateUtc *string `json:"releaseDateUtc,omitempty"`
// An array of reportingDescriptor objects relevant to the analysis performed by the tool component.
Rules []*ReportingDescriptor `json:"rules"`
// The tool component version in the format specified by Semantic Versioning 2.0.
SemanticVersion *string `json:"semanticVersion,omitempty"`
// A brief description of the tool component.
ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"`
// An array of toolComponentReference objects to declare the taxonomies supported by the tool component.
SupportedTaxonomies []*ToolComponentReference `json:"supportedTaxonomies"`
// An array of reportingDescriptor objects relevant to the definitions of both standalone and tool-defined taxonomies.
Taxa []*ReportingDescriptor `json:"taxa"`
// Translation metadata, required for a translation, not populated by other component types.
TranslationMetadata *TranslationMetadata `json:"translationMetadata,omitempty"`
// The tool component version, in whatever format the component natively provides.
Version *string `json:"version,omitempty"`
}
// NewToolComponent - creates a new
func NewToolComponent() *ToolComponent {
return &ToolComponent{
Contents: []string{"localizedData", "nonLocalizedData"},
IsComprehensive: false,
Language: "en-US",
Locations: make([]*ArtifactLocation, 0),
Notifications: make([]*ReportingDescriptor, 0),
Rules: make([]*ReportingDescriptor, 0),
SupportedTaxonomies: make([]*ToolComponentReference, 0),
Taxa: make([]*ReportingDescriptor, 0),
}
}
// AddGlobalMessageString - add a single GlobalMessageString to the ToolComponent
func (g *ToolComponent) AddGlobalMessageString(key string, globalMessageString MultiformatMessageString) *ToolComponent {
g.GlobalMessageStrings[key] = globalMessageString
return g
}
// WithGlobalMessageStrings - add a GlobalMessageStrings to the ToolComponent
func (g *ToolComponent) WithGlobalMessageStrings(globalMessageStrings map[string]MultiformatMessageString) *ToolComponent {
g.GlobalMessageStrings = globalMessageStrings
return g
}
// WithAssociatedComponent - add a AssociatedComponent to the ToolComponent
func (a *ToolComponent) WithAssociatedComponent(associatedComponent *ToolComponentReference) *ToolComponent {
a.AssociatedComponent = associatedComponent
return a
}
// WithContents - add a Contents to the ToolComponent
func (c *ToolComponent) WithContents(contents []string) *ToolComponent {
c.Contents = contents
return c
}
// AddContent - add a single Content to the ToolComponent
func (c *ToolComponent) AddContent(content string) *ToolComponent {
c.Contents = append(c.Contents, content)
return c
}
// WithDottedQuadFileVersion - add a DottedQuadFileVersion to the ToolComponent
func (d *ToolComponent) WithDottedQuadFileVersion(dottedQuadFileVersion string) *ToolComponent {
d.DottedQuadFileVersion = &dottedQuadFileVersion
return d
}
// WithDownloadURI - add a DownloadURI to the ToolComponent
func (d *ToolComponent) WithDownloadURI(downloadUri string) *ToolComponent {
d.DownloadURI = &downloadUri
return d
}
// WithFullDescription - add a FullDescription to the ToolComponent
func (f *ToolComponent) WithFullDescription(fullDescription *MultiformatMessageString) *ToolComponent {
f.FullDescription = fullDescription
return f
}
// WithFullName - add a FullName to the ToolComponent
func (f *ToolComponent) WithFullName(fullName string) *ToolComponent {
f.FullName = &fullName
return f
}
// WithGuID - add a GuID to the ToolComponent
func (g *ToolComponent) WithGuID(guid string) *ToolComponent {
g.GuID = &guid
return g
}
// WithInformationURI - add a InformationURI to the ToolComponent
func (i *ToolComponent) WithInformationURI(informationUri string) *ToolComponent {
i.InformationURI = &informationUri
return i
}
// WithIsComprehensive - add a IsComprehensive to the ToolComponent
func (i *ToolComponent) WithIsComprehensive(isComprehensive bool) *ToolComponent {
i.IsComprehensive = isComprehensive
return i
}
// WithLanguage - add a Language to the ToolComponent
func (l *ToolComponent) WithLanguage(language string) *ToolComponent {
l.Language = language
return l
}
// WithLocalizedDataSemanticVersion - add a LocalizedDataSemanticVersion to the ToolComponent
func (l *ToolComponent) WithLocalizedDataSemanticVersion(localizedDataSemanticVersion string) *ToolComponent {
l.LocalizedDataSemanticVersion = &localizedDataSemanticVersion
return l
}
// WithLocations - add a Locations to the ToolComponent
func (l *ToolComponent) WithLocations(locations []*ArtifactLocation) *ToolComponent {
l.Locations = locations
return l
}
// AddLocation - add a single Location to the ToolComponent
func (l *ToolComponent) AddLocation(location *ArtifactLocation) *ToolComponent {
l.Locations = append(l.Locations, location)
return l
}
// WithMinimumRequiredLocalizedDataSemanticVersion - add a MinimumRequiredLocalizedDataSemanticVersion to the ToolComponent
func (m *ToolComponent) WithMinimumRequiredLocalizedDataSemanticVersion(minimumRequiredLocalizedDataSemanticVersion string) *ToolComponent {
m.MinimumRequiredLocalizedDataSemanticVersion = &minimumRequiredLocalizedDataSemanticVersion
return m
}
// WithName - add a Name to the ToolComponent
func (n *ToolComponent) WithName(name string) *ToolComponent {
n.Name = &name
return n
}
// WithNotifications - add a Notifications to the ToolComponent
func (n *ToolComponent) WithNotifications(notifications []*ReportingDescriptor) *ToolComponent {
n.Notifications = notifications
return n
}
// AddNotification - add a single Notification to the ToolComponent
func (n *ToolComponent) AddNotification(notification *ReportingDescriptor) *ToolComponent {
n.Notifications = append(n.Notifications, notification)
return n
}
// WithOrganization - add a Organization to the ToolComponent
func (o *ToolComponent) WithOrganization(organization string) *ToolComponent {
o.Organization = &organization
return o
}
// WithProduct - add a Product to the ToolComponent
func (p *ToolComponent) WithProduct(product string) *ToolComponent {
p.Product = &product
return p
}
// WithProductSuite - add a ProductSuite to the ToolComponent
func (p *ToolComponent) WithProductSuite(productSuite string) *ToolComponent {
p.ProductSuite = &productSuite
return p
}
// WithProperties - add a Properties to the ToolComponent
func (p *ToolComponent) WithProperties(properties *PropertyBag) *ToolComponent {
p.Properties = properties
return p
}
// WithReleaseDateUtc - add a ReleaseDateUtc to the ToolComponent
func (r *ToolComponent) WithReleaseDateUtc(releaseDateUtc string) *ToolComponent {
r.ReleaseDateUtc = &releaseDateUtc
return r
}
// WithRules - add a Rules to the ToolComponent
func (r *ToolComponent) WithRules(rules []*ReportingDescriptor) *ToolComponent {
r.Rules = rules
return r
}
// AddRule - add a single Rule to the ToolComponent
func (r *ToolComponent) AddRule(rule *ReportingDescriptor) *ToolComponent {
r.Rules = append(r.Rules, rule)
return r
}
// WithSemanticVersion - add a SemanticVersion to the ToolComponent
func (s *ToolComponent) WithSemanticVersion(semanticVersion string) *ToolComponent {
s.SemanticVersion = &semanticVersion
return s
}
// WithShortDescription - add a ShortDescription to the ToolComponent
func (s *ToolComponent) WithShortDescription(shortDescription *MultiformatMessageString) *ToolComponent {
s.ShortDescription = shortDescription
return s
}
// WithSupportedTaxonomies - add a SupportedTaxonomies to the ToolComponent
func (s *ToolComponent) WithSupportedTaxonomies(supportedTaxonomies []*ToolComponentReference) *ToolComponent {
s.SupportedTaxonomies = supportedTaxonomies
return s
}
// AddSupportedTaxonomie - add a single SupportedTaxonomie to the ToolComponent
func (s *ToolComponent) AddSupportedTaxonomie(supportedTaxonomie *ToolComponentReference) *ToolComponent {
s.SupportedTaxonomies = append(s.SupportedTaxonomies, supportedTaxonomie)
return s
}
// WithTaxa - add a Taxa to the ToolComponent
func (t *ToolComponent) WithTaxa(taxa []*ReportingDescriptor) *ToolComponent {
t.Taxa = taxa
return t
}
// AddTaxa - add a single Taxa to the ToolComponent
func (t *ToolComponent) AddTaxa(taxa *ReportingDescriptor) *ToolComponent {
t.Taxa = append(t.Taxa, taxa)
return t
}
// WithTranslationMetadata - add a TranslationMetadata to the ToolComponent
func (t *ToolComponent) WithTranslationMetadata(translationMetadata *TranslationMetadata) *ToolComponent {
t.TranslationMetadata = translationMetadata
return t
}
// WithVersion - add a Version to the ToolComponent
func (v *ToolComponent) WithVersion(version string) *ToolComponent {
v.Version = &version
return v
}

View file

@ -0,0 +1,47 @@
package sarif
// ToolComponentReference - Identifies a particular toolComponent object, either the driver or an extension.
type ToolComponentReference struct {
// The 'guid' property of the referenced toolComponent.
GuID *string `json:"guid,omitempty"`
// An index into the referenced toolComponent in tool.extensions.
Index int `json:"index"`
// The 'name' property of the referenced toolComponent.
Name *string `json:"name,omitempty"`
// Key/value pairs that provide additional information about the toolComponentReference.
Properties *PropertyBag `json:"properties,omitempty"`
}
// NewToolComponentReference - creates a new
func NewToolComponentReference() *ToolComponentReference {
return &ToolComponentReference{
Index: -1,
}
}
// WithGuID - add a GuID to the ToolComponentReference
func (g *ToolComponentReference) WithGuID(guid string) *ToolComponentReference {
g.GuID = &guid
return g
}
// WithIndex - add a Index to the ToolComponentReference
func (i *ToolComponentReference) WithIndex(index int) *ToolComponentReference {
i.Index = index
return i
}
// WithName - add a Name to the ToolComponentReference
func (n *ToolComponentReference) WithName(name string) *ToolComponentReference {
n.Name = &name
return n
}
// WithProperties - add a Properties to the ToolComponentReference
func (p *ToolComponentReference) WithProperties(properties *PropertyBag) *ToolComponentReference {
p.Properties = properties
return p
}

View file

@ -0,0 +1,72 @@
package sarif
// TranslationMetadata - Provides additional metadata related to translation.
type TranslationMetadata struct {
// The absolute URI from which the translation metadata can be downloaded.
DownloadURI *string `json:"downloadUri,omitempty"`
// A comprehensive description of the translation metadata.
FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"`
// The full name associated with the translation metadata.
FullName *string `json:"fullName,omitempty"`
// The absolute URI from which information related to the translation metadata can be downloaded.
InformationURI *string `json:"informationUri,omitempty"`
// The name associated with the translation metadata.
Name *string `json:"name,omitempty"`
// Key/value pairs that provide additional information about the translation metadata.
Properties *PropertyBag `json:"properties,omitempty"`
// A brief description of the translation metadata.
ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"`
}
// NewTranslationMetadata - creates a new
func NewTranslationMetadata() *TranslationMetadata {
return &TranslationMetadata{}
}
// WithDownloadURI - add a DownloadURI to the TranslationMetadata
func (d *TranslationMetadata) WithDownloadURI(downloadUri string) *TranslationMetadata {
d.DownloadURI = &downloadUri
return d
}
// WithFullDescription - add a FullDescription to the TranslationMetadata
func (f *TranslationMetadata) WithFullDescription(fullDescription *MultiformatMessageString) *TranslationMetadata {
f.FullDescription = fullDescription
return f
}
// WithFullName - add a FullName to the TranslationMetadata
func (f *TranslationMetadata) WithFullName(fullName string) *TranslationMetadata {
f.FullName = &fullName
return f
}
// WithInformationURI - add a InformationURI to the TranslationMetadata
func (i *TranslationMetadata) WithInformationURI(informationUri string) *TranslationMetadata {
i.InformationURI = &informationUri
return i
}
// WithName - add a Name to the TranslationMetadata
func (n *TranslationMetadata) WithName(name string) *TranslationMetadata {
n.Name = &name
return n
}
// WithProperties - add a Properties to the TranslationMetadata
func (p *TranslationMetadata) WithProperties(properties *PropertyBag) *TranslationMetadata {
p.Properties = properties
return p
}
// WithShortDescription - add a ShortDescription to the TranslationMetadata
func (s *TranslationMetadata) WithShortDescription(shortDescription *MultiformatMessageString) *TranslationMetadata {
s.ShortDescription = shortDescription
return s
}

View file

@ -0,0 +1,72 @@
package sarif
// VersionControlDetails - Specifies the information necessary to retrieve a desired revision from a version control system.
type VersionControlDetails struct {
// A Coordinated Universal Time (UTC) date and time that can be used to synchronize an enlistment to the state of the repository at that time.
AsOfTimeUtc *string `json:"asOfTimeUtc,omitempty"`
// The name of a branch containing the revision.
Branch *string `json:"branch,omitempty"`
// The location in the local file system to which the root of the repository was mapped at the time of the analysis.
MappedTo *ArtifactLocation `json:"mappedTo,omitempty"`
// Key/value pairs that provide additional information about the version control details.
Properties *PropertyBag `json:"properties,omitempty"`
// The absolute URI of the repository.
RepositoryURI *string `json:"repositoryUri,omitempty"`
// A string that uniquely and permanently identifies the revision within the repository.
RevisionID *string `json:"revisionId,omitempty"`
// A tag that has been applied to the revision.
RevisionTag *string `json:"revisionTag,omitempty"`
}
// NewVersionControlDetails - creates a new
func NewVersionControlDetails() *VersionControlDetails {
return &VersionControlDetails{}
}
// WithAsOfTimeUtc - add a AsOfTimeUtc to the VersionControlDetails
func (a *VersionControlDetails) WithAsOfTimeUtc(asOfTimeUtc string) *VersionControlDetails {
a.AsOfTimeUtc = &asOfTimeUtc
return a
}
// WithBranch - add a Branch to the VersionControlDetails
func (b *VersionControlDetails) WithBranch(branch string) *VersionControlDetails {
b.Branch = &branch
return b
}
// WithMappedTo - add a MappedTo to the VersionControlDetails
func (m *VersionControlDetails) WithMappedTo(mappedTo *ArtifactLocation) *VersionControlDetails {
m.MappedTo = mappedTo
return m
}
// WithProperties - add a Properties to the VersionControlDetails
func (p *VersionControlDetails) WithProperties(properties *PropertyBag) *VersionControlDetails {
p.Properties = properties
return p
}
// WithRepositoryURI - add a RepositoryURI to the VersionControlDetails
func (r *VersionControlDetails) WithRepositoryURI(repositoryUri string) *VersionControlDetails {
r.RepositoryURI = &repositoryUri
return r
}
// WithRevisionID - add a RevisionID to the VersionControlDetails
func (r *VersionControlDetails) WithRevisionID(revisionId string) *VersionControlDetails {
r.RevisionID = &revisionId
return r
}
// WithRevisionTag - add a RevisionTag to the VersionControlDetails
func (r *VersionControlDetails) WithRevisionTag(revisionTag string) *VersionControlDetails {
r.RevisionTag = &revisionTag
return r
}

Some files were not shown because too many files have changed in this diff Show more