diff --git a/conformance2/.gitignore b/conformance2/.gitignore
new file mode 100644
index 00000000..cac97d18
--- /dev/null
+++ b/conformance2/.gitignore
@@ -0,0 +1,6 @@
+!go.mod
+!go.sum
+conformance
+conformance.test
+oci-conformance.yaml
+results/**
diff --git a/conformance2/Dockerfile b/conformance2/Dockerfile
new file mode 100644
index 00000000..23bf1dfe
--- /dev/null
+++ b/conformance2/Dockerfile
@@ -0,0 +1,10 @@
+FROM golang:1.24-alpine AS build
+
+COPY . .
+RUN CGO_ENABLED=0 go build -o /usr/local/bin/conformance .
+ENTRYPOINT [ "/usr/local/bin/conformance" ]
+
+FROM scratch
+COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+COPY --from=build /usr/local/bin/conformance /conformance
+ENTRYPOINT [ "/conformance" ]
diff --git a/conformance2/README.md b/conformance2/README.md
new file mode 100644
index 00000000..f2358659
--- /dev/null
+++ b/conformance2/README.md
@@ -0,0 +1,171 @@
+# OCI Distribution Spec Conformance Test
+
+The distribution-spec conformance test is used to verify the various HTTP endpoints on a registry generate the appropriate responses and handle different types of data.
+
+## Configuration
+
+The test is configured by either a yaml configuration file or environment variables.
+When a setting is configured by multiple sources, the precedence from highest to lowest is the environment variable, then yaml configuration file, and lastly any legacy environment variables.
+
+Most registries can be tested by setting the registry, repository, and login credentials.
+For APIs with a valid unsupported response code, attempts are made to track the missing feature without needing to manually disable the test.
+
+### Environment Variables
+
+Environment variables can be used to set any configuration setting in the conformance test.
+The available variables and their default values are listed here:
+
+```shell
+# several variables are used to configure the overall conformance test process
+export OCI_CONFIGURATION="oci-conformance.yaml" # see Yaml Configuration File below
+export OCI_RESULTS_DIR="./results" # output of the conformance test will be written here, see Results below
+export OCI_VERSION="1.1" # distribution-spec version to test against, this adjusts default values for the API tests
+export OCI_LOG=warn # adjust logging threshold: debug, info, warn, error (this does not affect the generated reports)
+
+# the registry settings typically need to be configured
+export OCI_REGISTRY="localhost:5000"
+export OCI_TLS="enabled" # enabled (https), insecure (self signed), or disabled (http)
+export OCI_REPO1="conformance/repo1"
+export OCI_REPO2="conformance/repo2"
+export OCI_USERNAME=
+export OCI_PASSWORD=
+
+# API settings with their 1.1 default values can be used to skip specific requests
+export OCI_API_PULL=true
+export OCI_API_PUSH=true # to disable push requests, see the OCI_RO_DATA variables below
+export OCI_API_BLOBS_ATOMIC=true # whether blob delete operations should be immediate
+export OCI_API_BLOBS_DELETE=true
+export OCI_API_BLOBS_MOUNT_ANONYMOUS=true # attempt to mount a blob without a source repository
+export OCI_API_MANIFESTS_ATOMIC=true # whether manifest delete operations should be immediate
+export OCI_API_MANIFESTS_DELETE=true
+export OCI_API_TAGS_ATOMIC=true # whether tag delete operations should be immediate
+export OCI_API_TAGS_DELETE=true
+export OCI_API_TAGS_LIST=true
+export OCI_API_REFERRER=true
+
+# Data settings are used to generate a variety of OCI content
+export OCI_DATA_IMAGE=true # note, this must be left enabled for any tests to run
+export OCI_DATA_INDEX=true
+export OCI_DATA_INDEX_LIST=true # an index containing a nested index
+export OCI_DATA_SPARSE=false # an index containing references to manifests that have not been pushed
+export OCI_DATA_ARTIFACT=true # an OCI artifact packaged as an image with an artifactType
+export OCI_DATA_SUBJECT=true # an OCI image with the subject field defined
+export OCI_DATA_SUBJECT_MISSING=true # pushes content with a subject referencing a non-existent digest
+export OCI_DATA_ARTIFACT_LIST=true # an OCI index with an artifactType
+export OCI_DATA_SUBJECT_LIST=true # an OCI index with the subject field defined
+export OCI_DATA_DATA_FIELD=true # descriptors with the data field populated
+export OCI_DATA_NONDISTRIBUTABLE=true # an OCI image containing nondistributable layer references that have not been pushed
+export OCI_DATA_CUSTOM_FIELDS=true # manifests and config json with additional fields
+export OCI_DATA_EMPTY_BLOB=true # zero byte blob
+export OCI_DATA_SHA512=true # content pushed using the sha512 digest algorithm
+
+# For testing read-only registries, images must be preloaded.
+# OCI_API_PUSH=false must be set, and disabling DELETE APIs is recommended.
+# All requests are performed against the OCI_REPO1 repository.
+export OCI_RO_DATA_TAGS= # space separated list of tags
+export OCI_RO_DATA_MANIFESTS= # space separated list of manifest digests
+export OCI_RO_DATA_BLOBS= # space separated list of blob digests
+export OCI_RO_DATA_REFERRERS= # space separated list of subject digests for the referrers API
+```
+
+### Yaml Configuration File
+
+The conformance test will load `oci-conformance.yaml` by default, which can be configured with the `OCI_CONFIGURATION` environment variable.
+
+The default yaml configuration is shown below and matches the environment variables described above:
+
+```yaml
+resultsDir: ./results
+version: "1.1"
+registry: localhost:5000
+tls: enabled
+repo1: conformance/repo1
+repo2: conformance/repo2
+username: ""
+password: ""
+logging: warn
+apis:
+ pull: true
+ push: true
+ blobs:
+ atomic: true
+ delete: true
+ mountAnonymous: false
+ manifests:
+ atomic: true
+ delete: true
+ tags:
+ atomic: true
+ delete: true
+ list: true
+ referrer: true
+data:
+ image: true
+ index: true
+ indexList: true
+ sparse: false
+ artifact: true
+ subject: true
+ subjectMissing: true
+ artifactList: true
+ subjectList: true
+ dataField: true
+ nondistributable: true
+ customFields: true
+ emptyBlob: true
+ sha512: true
+roData:
+ tags: []
+ manifests: []
+ blobs: []
+ referrers: []
+```
+
+## Running the Test
+
+The test is available to be run with Go, Docker, or GitHub Actions.
+
+### Go
+
+The tests require Go 1.24 or greater.
+
+They can be run directly with:
+
+```shell
+go run -buildvcs=true .
+```
+
+Or to compile and run separately:
+
+```shell
+go build -o conformance .
+./conformance
+```
+
+### Docker
+
+First configure the test with environment variables or a configuration file as described above.
+Then build and run the conformance test using a command similar to below:
+
+```shell
+docker build -t conformance .
+docker run -it --rm --net=host \
+ -u "$(id -u):$(id -g)" \
+ -v "$(pwd)/results:/results" \
+ -e OCI_REGISTRY -e OCI_TLS -e OCI_REPO1 -e OCI_REPO2 -e OCI_USERNAME -e OCI_PASSWORD -e OCI_VERSION \
+ conformance:latest
+```
+
+Additional environment variables can be specified as needed, or the `oci-conformance.yaml` file can be passed as a volume, mounted at `/oci-conformance.yaml` inside the container.
+
+### GitHub Actions
+
+TODO
+
+## Results
+
+A summary of the test is output to the screen along with any logging.
+The results directory (`results` by default) is populated with the following files:
+
+- `report.html`: Full report of the test, including censored output of each request and response.
+- `junit.xml`: JUnit report.
diff --git a/conformance2/api.go b/conformance2/api.go
new file mode 100644
index 00000000..924bbeda
--- /dev/null
+++ b/conformance2/api.go
@@ -0,0 +1,1226 @@
+package main
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "net/http"
+ "net/url"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/distribution-spec/specs-go/v1"
+ digest "github.com/opencontainers/go-digest"
+ image "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var emptyDigest = digest.Canonical.FromBytes([]byte{})
+
+type api struct {
+ client *http.Client
+ user, pass string
+}
+
+type apiOpt func(*api)
+
+func apiNew(client *http.Client, opts ...apiOpt) *api {
+ a := &api{
+ client: client,
+ }
+ for _, opt := range opts {
+ opt(a)
+ }
+ return a
+}
+
+func apiWithAuth(user, pass string) apiOpt {
+ return func(a *api) {
+ a.user = user
+ a.pass = pass
+ }
+}
+
+type apiDoOpt struct {
+ reqFn func(*http.Request) error
+ respFn func(*http.Response) error
+ out io.Writer
+ flags map[string]bool
+}
+
+func (a *api) Do(opts ...apiDoOpt) error {
+ errs := []error{}
+ reqFns := []func(*http.Request) error{}
+ respFns := []func(*http.Response) error{}
+ var out io.Writer
+ for _, opt := range opts {
+ if opt.reqFn != nil {
+ reqFns = append(reqFns, opt.reqFn)
+ }
+ if opt.respFn != nil {
+ respFns = append(respFns, opt.respFn)
+ }
+ if opt.out != nil {
+ out = opt.out
+ }
+ }
+ req, err := http.NewRequest(http.MethodGet, "", nil)
+ if err != nil {
+ return err
+ }
+ for _, reqFn := range reqFns {
+ err := reqFn(req)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) == 1 {
+ return errs[0]
+ } else if len(errs) > 1 {
+ return errors.Join(errs...)
+ }
+ if out != nil {
+ out = redactWriter{w: out}
+ }
+ wt := &wrapTransport{out: out, orig: a.client.Transport}
+ if a.client.Transport == nil {
+ wt.orig = http.DefaultTransport
+ }
+ c := *a.client
+ c.Transport = wt
+ resp, err := c.Do(req)
+ if err != nil {
+ return err
+ }
+ // on auth failures, generate the auth header and retry
+ if resp.StatusCode == http.StatusUnauthorized {
+ auth, err := a.getAuthHeader(c, resp)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ if err == nil && auth != "" {
+ req.Header.Set("Authorization", auth)
+ if req.GetBody != nil {
+ req.Body, err = req.GetBody()
+ if err != nil {
+ return fmt.Errorf("failed to reset body after auth request: %w", err)
+ }
+ }
+ resp, err = c.Do(req)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for _, respFn := range respFns {
+ err := respFn(resp)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) == 1 {
+ return errs[0]
+ } else if len(errs) > 1 {
+ return errors.Join(errs...)
+ }
+ return nil
+}
+
+func (a *api) GetFlags(opts ...apiDoOpt) map[string]bool {
+ ret := map[string]bool{}
+ for _, opt := range opts {
+ maps.Copy(ret, opt.flags)
+ }
+ return ret
+}
+
+func (a *api) BlobDelete(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/" + dig.String())
+ if err != nil {
+ return err
+ }
+ var status int
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("DELETE"),
+ apiWithURL(u),
+ apiExpectStatus(http.StatusAccepted, http.StatusNotFound, http.StatusMethodNotAllowed),
+ apiReturnStatus(&status),
+ )
+ if err != nil {
+ return fmt.Errorf("blob delete failed: %v", err)
+ }
+ if status == http.StatusMethodNotAllowed {
+ return fmt.Errorf("registry returned status %d%.0w", status, ErrRegUnsupported)
+ }
+ return nil
+}
+
+func (a *api) BlobGetReq(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/" + dig.String())
+ if err != nil {
+ return err
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("GET"),
+ apiWithURL(u),
+ )
+ if err != nil {
+ return fmt.Errorf("blob get failed: %v", err)
+ }
+ return nil
+}
+
+func (a *api) BlobGetExistsFull(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ opts = append(opts,
+ apiExpectStatus(http.StatusOK),
+ )
+ if val, ok := td.blobs[dig]; ok && (len(val) > 0 || dig == emptyDigest) {
+ opts = append(opts, apiExpectBody(val), apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val))))
+ }
+ return a.BlobGetReq(registry, repo, dig, td, opts...)
+}
+
+func (a *api) BlobHeadReq(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/" + dig.String())
+ if err != nil {
+ return err
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("HEAD"),
+ apiWithURL(u),
+ )
+ if err != nil {
+ return fmt.Errorf("blob head failed: %v", err)
+ }
+ return nil
+}
+
+func (a *api) BlobHeadExists(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ opts = append(opts,
+ apiExpectStatus(http.StatusOK),
+ apiExpectBody([]byte{}),
+ )
+ if val, ok := td.blobs[dig]; ok && (len(val) > 0 || dig == emptyDigest) {
+ opts = append(opts, apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val))))
+ }
+ return a.BlobHeadReq(registry, repo, dig, td, opts...)
+}
+
+func (a *api) BlobMount(registry, repo, source string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ bodyBytes, ok := td.blobs[dig]
+ if !ok {
+ return fmt.Errorf("BlobPostPut missing expected digest to send: %s%.0w", dig.String(), errTestAPIError)
+ }
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/")
+ if err != nil {
+ return err
+ }
+ qa := u.Query()
+ qa.Set("mount", dig.String())
+ if source != "" {
+ qa.Set("from", source)
+ }
+ u.RawQuery = qa.Encode()
+ // TODO: add digest algorithm if not sha256
+ loc := ""
+ status := 0
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("POST"),
+ apiWithURL(u),
+ apiExpectStatus(http.StatusCreated, http.StatusAccepted),
+ apiReturnHeader("Location", &loc),
+ apiReturnStatus(&status),
+ )
+ if err != nil {
+ return fmt.Errorf("blob post failed: %v", err)
+ }
+ if loc == "" {
+ return fmt.Errorf("blob post did not return a location")
+ }
+ if status == http.StatusCreated {
+ // successful mount
+ return nil
+ }
+ // fallback to post+put
+ u, err = u.Parse(loc)
+ if err != nil {
+ return fmt.Errorf("blob post could not parse location header: %v", err)
+ }
+ qa = u.Query()
+ qa.Set("digest", dig.String())
+ u.RawQuery = qa.Encode()
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("PUT"),
+ apiWithURL(u),
+ apiWithContentLength(int64(len(bodyBytes))),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ apiWithBody(bodyBytes),
+ apiExpectStatus(http.StatusCreated),
+ apiExpectHeader("Location", ""),
+ )
+ if err != nil {
+ return fmt.Errorf("blob put failed: %v", err)
+ }
+ return fmt.Errorf("registry returned status %d, fell back to blob POST+PUT%.0w", status, ErrRegUnsupported)
+}
+
+func (a *api) BlobPatchChunked(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ flags := a.GetFlags(opts...)
+ bodyBytes, ok := td.blobs[dig]
+ if !ok {
+ return fmt.Errorf("BlobPatchChunked missing expected digest to send: %s%.0w", dig.String(), errTestAPIError)
+ }
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/")
+ if err != nil {
+ return err
+ }
+ // TODO: add digest algorithm if not sha256
+ minStr := ""
+ loc := ""
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("POST"),
+ apiWithURL(u),
+ apiWithContentLength(0),
+ apiExpectStatus(http.StatusAccepted),
+ apiReturnHeader("OCI-Chunk-Min-Length", &minStr),
+ apiReturnHeader("Location", &loc),
+ )
+ if err != nil {
+ return fmt.Errorf("blob post failed: %v", err)
+ }
+ // calc chunk size to make 3 chunks, adjust to min chunk size if specified
+ chunkSize := len(bodyBytes)/3 + 1
+ if minStr != "" {
+ min, err := strconv.Atoi(minStr)
+ if err != nil {
+ return fmt.Errorf("parsing OCI-Chunk-Min-Length size %q failed: %v", minStr, err)
+ }
+ if min > chunkSize {
+ chunkSize = min
+ }
+ }
+ if chunkSize < chunkMin {
+ chunkSize = chunkMin
+ }
+ if chunkSize > len(bodyBytes) {
+ chunkSize = len(bodyBytes)
+ }
+ lastByte := -1
+ // loop over the number of chunks
+ for lastByte < len(bodyBytes)-1 {
+ if loc == "" {
+ return fmt.Errorf("blob request did not return a location")
+ }
+ u, err = u.Parse(loc)
+ if err != nil {
+ return fmt.Errorf("blob request could not parse location header: %v", err)
+ }
+ start := lastByte + 1
+ lastByte = min(start+chunkSize-1, len(bodyBytes)-1)
+ method := "PATCH"
+ expStatus := http.StatusAccepted
+ if flags["PutLastChunk"] && lastByte == len(bodyBytes)-1 {
+ method = "PUT"
+ qa := u.Query()
+ qa.Set("digest", dig.String())
+ u.RawQuery = qa.Encode()
+ if flags["ExpectBadDigest"] {
+ expStatus = http.StatusBadRequest
+ } else {
+ expStatus = http.StatusCreated
+ }
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod(method),
+ apiWithURL(u),
+ apiWithContentLength(int64(lastByte-start+1)),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ apiWithHeaderAdd("Content-Range", fmt.Sprintf("%d-%d", start, lastByte)),
+ apiWithBody(bodyBytes[start:lastByte+1]),
+ apiExpectStatus(expStatus),
+ apiReturnHeader("Location", &loc),
+ )
+ if err != nil {
+ return fmt.Errorf("blob patch failed: %v", err)
+ }
+ }
+ if !flags["PutLastChunk"] {
+ if loc == "" {
+ return fmt.Errorf("blob patch did not return a location")
+ }
+ u, err = u.Parse(loc)
+ if err != nil {
+ return fmt.Errorf("blob patch could not parse location header: %v", err)
+ }
+ qa := u.Query()
+ qa.Set("digest", dig.String())
+ u.RawQuery = qa.Encode()
+ if flags["ExpectBadDigest"] {
+ opts = append(opts,
+ apiExpectStatus(http.StatusBadRequest),
+ )
+ } else {
+ opts = append(opts,
+ apiExpectStatus(http.StatusCreated),
+ apiExpectHeader("Location", ""),
+ )
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("PUT"),
+ apiWithURL(u),
+ apiWithContentLength(0),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ )
+ if err != nil {
+ return fmt.Errorf("blob put failed: %v", err)
+ }
+ }
+ return nil
+}
+
+func (a *api) BlobPatchStream(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ flags := a.GetFlags(opts...)
+ bodyBytes, ok := td.blobs[dig]
+ if !ok {
+ return fmt.Errorf("BlobPatchStream missing expected digest to send: %s%.0w", dig.String(), errTestAPIError)
+ }
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/")
+ if err != nil {
+ return err
+ }
+ // TODO: add digest algorithm if not sha256
+ loc := ""
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("POST"),
+ apiWithURL(u),
+ apiWithContentLength(0),
+ apiExpectStatus(http.StatusAccepted),
+ apiReturnHeader("Location", &loc),
+ )
+ if err != nil {
+ return fmt.Errorf("blob post failed: %v", err)
+ }
+ if loc == "" {
+ return fmt.Errorf("blob post did not return a location")
+ }
+ u, err = u.Parse(loc)
+ if err != nil {
+ return fmt.Errorf("blob post could not parse location header: %v", err)
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("PATCH"),
+ apiWithURL(u),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ apiWithBody(bodyBytes),
+ apiExpectStatus(http.StatusAccepted),
+ apiReturnHeader("Location", &loc),
+ )
+ if err != nil {
+ return fmt.Errorf("blob patch failed: %v", err)
+ }
+ if loc == "" {
+ return fmt.Errorf("blob patch did not return a location")
+ }
+ u, err = u.Parse(loc)
+ if err != nil {
+ return fmt.Errorf("blob patch could not parse location header: %v", err)
+ }
+ qa := u.Query()
+ qa.Set("digest", dig.String())
+ u.RawQuery = qa.Encode()
+ if flags["ExpectBadDigest"] {
+ opts = append(opts,
+ apiExpectStatus(http.StatusBadRequest),
+ )
+ } else {
+ opts = append(opts,
+ apiExpectStatus(http.StatusCreated),
+ apiExpectHeader("Location", ""),
+ )
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("PUT"),
+ apiWithURL(u),
+ apiWithContentLength(0),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ )
+ if err != nil {
+ return fmt.Errorf("blob put failed: %v", err)
+ }
+ return nil
+}
+
+func (a *api) BlobPostOnly(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ flags := a.GetFlags(opts...)
+ bodyBytes, ok := td.blobs[dig]
+ if !ok {
+ return fmt.Errorf("BlobPostOnly missing expected digest to send: %s%.0w", dig.String(), errTestAPIError)
+ }
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/")
+ if err != nil {
+ return err
+ }
+ qa := u.Query()
+ qa.Set("digest", dig.String())
+ u.RawQuery = qa.Encode()
+ loc := ""
+ var status int
+ var postOpts []apiDoOpt
+ if flags["ExpectBadDigest"] {
+ postOpts = append([]apiDoOpt{
+ apiExpectStatus(http.StatusBadRequest, http.StatusAccepted),
+ }, opts...)
+ } else {
+ postOpts = append([]apiDoOpt{
+ apiExpectStatus(http.StatusCreated, http.StatusAccepted),
+ apiExpectHeader("Location", ""),
+ }, opts...)
+ }
+ err = a.Do(apiWithAnd(postOpts),
+ apiWithMethod("POST"),
+ apiWithURL(u),
+ apiWithContentLength(int64(len(bodyBytes))),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ apiWithBody(bodyBytes),
+ apiReturnStatus(&status),
+ apiReturnHeader("Location", &loc),
+ )
+ if err != nil {
+ return fmt.Errorf("blob post failed: %v", err)
+ }
+ if status == http.StatusAccepted {
+ // fallback to a PUT request, but track the unsupported API
+ var putOpts []apiDoOpt
+ if flags["ExpectBadDigest"] {
+ putOpts = append([]apiDoOpt{
+ apiExpectStatus(http.StatusBadRequest),
+ }, opts...)
+ } else {
+ putOpts = append([]apiDoOpt{
+ apiExpectStatus(http.StatusCreated),
+ apiExpectHeader("Location", ""),
+ }, opts...)
+ }
+ u, err = u.Parse(loc)
+ if err != nil {
+ return fmt.Errorf("blob post could not parse location header: %v", err)
+ }
+ qa := u.Query()
+ qa.Set("digest", dig.String())
+ u.RawQuery = qa.Encode()
+ err = a.Do(apiWithAnd(putOpts),
+ apiWithMethod("PUT"),
+ apiWithURL(u),
+ apiWithContentLength(int64(len(bodyBytes))),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ apiWithBody(bodyBytes),
+ )
+ if err != nil {
+ return fmt.Errorf("blob post failed: %v", err)
+ }
+ return fmt.Errorf("registry does not support content in the POST, fallback to PUT%.0w", ErrRegUnsupported)
+ }
+ return nil
+}
+
+func (a *api) BlobPostPut(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ flags := a.GetFlags(opts...)
+ bodyBytes, ok := td.blobs[dig]
+ if !ok {
+ return fmt.Errorf("BlobPostPut missing expected digest to send: %s%.0w", dig.String(), errTestAPIError)
+ }
+ u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/")
+ if err != nil {
+ return err
+ }
+ // TODO: add digest algorithm if not sha256
+ loc := ""
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("POST"),
+ apiWithURL(u),
+ apiExpectStatus(http.StatusAccepted),
+ apiReturnHeader("Location", &loc),
+ )
+ if err != nil {
+ return fmt.Errorf("blob post failed: %v", err)
+ }
+ if loc == "" {
+ return fmt.Errorf("blob post did not return a location")
+ }
+ u, err = u.Parse(loc)
+ if err != nil {
+ return fmt.Errorf("blob post could not parse location header: %v", err)
+ }
+ qa := u.Query()
+ qa.Set("digest", dig.String())
+ u.RawQuery = qa.Encode()
+ if flags["ExpectBadDigest"] {
+ opts = append(opts,
+ apiExpectStatus(http.StatusBadRequest),
+ )
+ } else {
+ opts = append(opts,
+ apiExpectStatus(http.StatusCreated),
+ apiExpectHeader("Location", ""),
+ )
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("PUT"),
+ apiWithURL(u),
+ apiWithContentLength(int64(len(bodyBytes))),
+ apiWithHeaderAdd("Content-Type", "application/octet-stream"),
+ apiWithBody(bodyBytes),
+ )
+ if err != nil {
+ return fmt.Errorf("blob put failed: %v", err)
+ }
+ return nil
+}
+
+func (a *api) ManifestDelete(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref)
+ if err != nil {
+ return err
+ }
+ var status int
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("DELETE"),
+ apiWithURL(u),
+ apiExpectStatus(http.StatusAccepted, http.StatusNotFound, http.StatusBadRequest, http.StatusMethodNotAllowed),
+ apiReturnStatus(&status),
+ )
+ if err != nil {
+ return fmt.Errorf("manifest delete failed: %v", err)
+ }
+ if status == http.StatusBadRequest || status == http.StatusMethodNotAllowed {
+ return fmt.Errorf("registry returned status %d%.0w", status, ErrRegUnsupported)
+ }
+ return nil
+}
+
+func (a *api) ManifestGetReq(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref)
+ if err != nil {
+ return err
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("GET"),
+ apiWithURL(u),
+ apiWithHeaderAdd("Accept", "application/vnd.oci.image.index.v1+json"),
+ apiWithHeaderAdd("Accept", "application/vnd.oci.image.manifest.v1+json"),
+ )
+ if err != nil {
+ return fmt.Errorf("manifest get failed: %v", err)
+ }
+ return nil
+}
+
+func (a *api) ManifestGetExists(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ opts = append(opts,
+ apiExpectStatus(http.StatusOK),
+ )
+ if val, ok := td.manifests[dig]; ok && len(val) > 0 {
+ mediaType := detectMediaType(val)
+ opts = append(opts,
+ apiExpectBody(val),
+ apiExpectHeader("Content-Type", mediaType),
+ apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val))),
+ )
+ }
+ return a.ManifestGetReq(registry, repo, ref, dig, td, opts...)
+}
+
+func (a *api) ManifestHeadReq(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref)
+ if err != nil {
+ return err
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("HEAD"),
+ apiWithURL(u),
+ apiWithHeaderAdd("Accept", "application/vnd.oci.image.index.v1+json"),
+ apiWithHeaderAdd("Accept", "application/vnd.oci.image.manifest.v1+json"),
+ )
+ if err != nil {
+ return fmt.Errorf("manifest head failed: %v", err)
+ }
+ return nil
+}
+
+func (a *api) ManifestHeadExists(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ opts = append(opts,
+ apiExpectStatus(http.StatusOK),
+ apiExpectBody([]byte{}),
+ )
+ if val, ok := td.manifests[dig]; ok && len(val) > 0 {
+ mediaType := detectMediaType(val)
+ opts = append(opts,
+ apiExpectHeader("Content-Type", mediaType),
+ apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val))),
+ )
+ }
+ return a.ManifestHeadReq(registry, repo, ref, dig, td, opts...)
+}
+
+func (a *api) ManifestPut(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error {
+ flags := a.GetFlags(opts...)
+ bodyBytes, ok := td.manifests[dig]
+ if !ok {
+ return fmt.Errorf("ManifestPut missing expected digest to send: %s%.0w", dig.String(), errTestAPIError)
+ }
+ u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref)
+ if err != nil {
+ return err
+ }
+ mediaType := detectMediaType(bodyBytes)
+ resp := http.Response{}
+ if flags["ExpectBadDigest"] {
+ opts = append(opts,
+ apiExpectStatus(http.StatusBadRequest),
+ )
+ } else {
+ opts = append(opts,
+ apiExpectStatus(http.StatusCreated),
+ apiExpectHeader("Location", ""),
+ )
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithMethod("PUT"),
+ apiWithURL(u),
+ apiWithBody(bodyBytes),
+ apiWithHeaderAdd("Content-Type", mediaType),
+ apiReturnResponse(&resp),
+ )
+ if err != nil {
+ return fmt.Errorf("manifest put failed: %v", err)
+ }
+ // do not validate response if a failure was expected
+ if flags["ExpectBadDigest"] {
+ return nil
+ }
+ digHeader := resp.Header.Get("Docker-Content-Digest")
+ if digHeader == "" {
+ return fmt.Errorf("warning: registry does not return a Docker-Content-Digest header")
+ }
+ if digHeader != "" && digHeader != dig.String() {
+ return fmt.Errorf("Docker-Content-Digest header value expected %q, received %q", dig.String(), digHeader)
+ }
+ return nil
+}
+
+func (a *api) ReferrersList(registry, repo string, dig digest.Digest, opts ...apiDoOpt) (image.Index, error) {
+ rl := image.Index{}
+ u, err := url.Parse(registry + "/v2/" + repo + "/referrers/" + dig.String())
+ if err != nil {
+ return rl, err
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithURL(u),
+ apiExpectHeader("Content-Type", "application/vnd.oci.image.index.v1+json"),
+ apiExpectStatus(http.StatusOK),
+ apiReturnJSONBody(&rl),
+ )
+ return rl, err
+}
+
+func (a *api) TagList(registry, repo string, opts ...apiDoOpt) (specs.TagList, error) {
+ tl := specs.TagList{}
+ u, err := url.Parse(registry + "/v2/" + repo + "/tags/list")
+ if err != nil {
+ return tl, err
+ }
+ err = a.Do(apiWithAnd(opts),
+ apiWithURL(u),
+ apiWithOr(
+ []apiDoOpt{
+ apiExpectStatus(http.StatusOK),
+ apiReturnJSONBody(&tl),
+ },
+ []apiDoOpt{
+ apiExpectStatus(http.StatusNotFound),
+ },
+ ),
+ )
+ return tl, err
+}
+
+func apiWithAnd(opts []apiDoOpt) apiDoOpt {
+ ret := apiDoOpt{}
+ reqFns := [](func(*http.Request) error){}
+ respFns := [](func(*http.Response) error){}
+ for _, opt := range opts {
+ if opt.reqFn != nil {
+ reqFns = append(reqFns, opt.reqFn)
+ }
+ if opt.respFn != nil {
+ respFns = append(respFns, opt.respFn)
+ }
+ if opt.out != nil {
+ ret.out = opt.out
+ }
+ }
+ if len(reqFns) == 1 {
+ ret.reqFn = reqFns[0]
+ } else if len(reqFns) > 0 {
+ ret.reqFn = func(r *http.Request) error {
+ errs := []error{}
+ for _, fn := range reqFns {
+ err := fn(r)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) == 1 {
+ return errs[0]
+ }
+ return errors.Join(errs...)
+ }
+ }
+ if len(respFns) == 1 {
+ ret.respFn = respFns[0]
+ } else if len(respFns) > 0 {
+ ret.respFn = func(r *http.Response) error {
+ errs := []error{}
+ for _, fn := range respFns {
+ err := fn(r)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) == 1 {
+ return errs[0]
+ }
+ return errors.Join(errs...)
+ }
+ }
+ return ret
+}
+
+// apiWithOr succeeds with any of the lists of respFn's are all successful.
+// Note that reqFn entries are ignored.
+func apiWithOr(optLists ...[]apiDoOpt) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(resp *http.Response) error {
+ errsOr := []error{}
+ for _, opts := range optLists {
+ errsResp := []error{}
+ for _, opt := range opts {
+ if opt.respFn != nil {
+ err := opt.respFn(resp)
+ if err != nil {
+ errsResp = append(errsResp, err)
+ }
+ }
+ }
+ if len(errsResp) == 0 {
+ return nil
+ }
+ errsOr = append(errsOr, errors.Join(errsResp...))
+ }
+ return fmt.Errorf("response did not match any condition: %w", errors.Join(errsOr...))
+ },
+ }
+}
+
+func apiWithFlag(flag string) apiDoOpt {
+ return apiDoOpt{
+ flags: map[string]bool{flag: true},
+ }
+}
+
+func apiWithMethod(method string) apiDoOpt {
+ return apiDoOpt{
+ reqFn: func(req *http.Request) error {
+ req.Method = method
+ return nil
+ },
+ }
+}
+
+func apiWithURL(u *url.URL) apiDoOpt {
+ return apiDoOpt{
+ reqFn: func(req *http.Request) error {
+ req.URL = u
+ return nil
+ },
+ }
+}
+
+func apiWithContentLength(l int64) apiDoOpt {
+ return apiDoOpt{
+ reqFn: func(req *http.Request) error {
+ req.ContentLength = l
+ if req.Header == nil {
+ req.Header = http.Header{}
+ }
+ req.Header.Add("Content-Length", fmt.Sprintf("%d", l))
+ return nil
+ },
+ }
+}
+
+func apiWithHeaderAdd(key, value string) apiDoOpt {
+ return apiDoOpt{
+ reqFn: func(req *http.Request) error {
+ if req.Header == nil {
+ req.Header = http.Header{}
+ }
+ req.Header.Add(key, value)
+ return nil
+ },
+ }
+}
+
+func apiWithBody(body []byte) apiDoOpt {
+ return apiDoOpt{
+ reqFn: func(req *http.Request) error {
+ req.Body = io.NopCloser(bytes.NewReader(body))
+ req.GetBody = func() (io.ReadCloser, error) {
+ return io.NopCloser(bytes.NewReader(body)), nil
+ }
+ return nil
+ },
+ }
+}
+
+func apiExpectBody(bodyExpect []byte) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(resp *http.Response) error {
+ // read body and replace with a buf reader
+ bodyReceived, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("failed to read body: %w", err)
+ }
+ resp.Body = io.NopCloser(bytes.NewReader(bodyReceived))
+ if bytes.Equal(bodyExpect, bodyReceived) {
+ return nil
+ }
+ var bufExpect, bufReceived bytes.Buffer
+ err = printBody(bodyReceived, &bufReceived)
+ if err != nil {
+ return fmt.Errorf("failed to print received body: %w", err)
+ }
+ err = printBody(bodyExpect, &bufExpect)
+ if err != nil {
+ return fmt.Errorf("failed to print expected body: %w", err)
+ }
+ return fmt.Errorf("body contents mismatch, expected %s, received %s", strings.TrimSpace(bufExpect.String()), strings.TrimSpace(bufReceived.String()))
+ },
+ }
+}
+
+func apiExpectHeader(key, val string) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(resp *http.Response) error {
+ cur := resp.Header.Get(key)
+ if val == "" {
+ if cur == "" {
+ return fmt.Errorf("missing header %q", key)
+ }
+ } else {
+ if cur != val {
+ return fmt.Errorf("header value mismatch for %q, expected %q, received %q", key, val, cur)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func apiExpectStatus(statusCodes ...int) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(resp *http.Response) error {
+ if slices.Contains(statusCodes, resp.StatusCode) {
+ return nil
+ }
+ return fmt.Errorf("unexpected status code, expected one of %v, received %d", statusCodes, resp.StatusCode)
+ },
+ }
+}
+
+func apiReturnHeader(key string, val *string) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(resp *http.Response) error {
+ cur := resp.Header.Get(key)
+ if cur != "" {
+ *val = cur
+ }
+ return nil
+ },
+ }
+}
+
+func apiReturnJSONBody(data any) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(resp *http.Response) error {
+ return json.NewDecoder(resp.Body).Decode(data)
+ },
+ }
+}
+
+func apiReturnResponse(ret *http.Response) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(r *http.Response) error {
+ *ret = *r
+ return nil
+ },
+ }
+}
+
+func apiReturnStatus(status *int) apiDoOpt {
+ return apiDoOpt{
+ respFn: func(resp *http.Response) error {
+ *status = resp.StatusCode
+ return nil
+ },
+ }
+}
+
+func apiSaveOutput(out io.Writer) apiDoOpt {
+ return apiDoOpt{
+ out: out,
+ }
+}
+
+type authHeader struct {
+ Type string
+ Realm string
+ Service string
+ Scope string
+}
+
+type authInfo struct {
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+}
+
+func (a *api) getAuthHeader(client http.Client, resp *http.Response) (string, error) {
+ header := resp.Header.Get("WWW-Authenticate")
+ if resp.StatusCode != http.StatusUnauthorized || header == "" {
+ return "", fmt.Errorf("status code or header invalid for adding auth, status %d, header %s", resp.StatusCode, header)
+ }
+ parsed, err := parseAuthHeader(header)
+ if err != nil {
+ return "", err
+ }
+ if parsed.Type == "basic" {
+ return fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(a.user+":"+a.pass))), nil
+ }
+ if parsed.Type == "bearer" {
+ u, err := resp.Request.URL.Parse(parsed.Realm)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse realm url: %w", err)
+ }
+ param := url.Values{}
+ param.Set("service", parsed.Service)
+ if parsed.Scope != "" {
+ param.Set("scope", parsed.Scope)
+ }
+ u.RawQuery = param.Encode()
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return "", fmt.Errorf("failed to created request: %w", err)
+ }
+ req.Header.Set("Accept", "application/json")
+ req.SetBasicAuth(a.user, a.pass)
+ authResp, err := client.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("failed to send auth request: %w", err)
+ }
+ if authResp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("invalid status on auth request: %d", authResp.StatusCode)
+ }
+ ai := authInfo{}
+ if err := json.NewDecoder(authResp.Body).Decode(&ai); err != nil {
+ return "", fmt.Errorf("failed to parse auth response: %w", err)
+ }
+ if ai.AccessToken != "" {
+ ai.Token = ai.AccessToken
+ }
+ return fmt.Sprintf("Bearer %s", ai.Token), nil
+ }
+ return "", fmt.Errorf("failed to parse auth header, type=%s: %s", parsed.Type, header)
+}
+
+var (
+ authHeaderMatcher = regexp.MustCompile("(?i).*(bearer|basic).*")
+ authParamsMatcher = regexp.MustCompile(`([a-zA-z]+)="(.+?)"`)
+)
+
+func parseAuthHeader(header string) (authHeader, error) {
+ // TODO: replace with a better parser, quotes should be optional, get character set from upstream http rfc
+ var parsed authHeader
+ parsed.Type = strings.ToLower(authHeaderMatcher.ReplaceAllString(header, "$1"))
+ if parsed.Type == "bearer" {
+ matches := authParamsMatcher.FindAllStringSubmatch(header, -1)
+ for _, match := range matches {
+ switch strings.ToLower(match[1]) {
+ case "realm":
+ parsed.Realm = match[2]
+ case "service":
+ parsed.Service = match[2]
+ case "scope":
+ parsed.Scope = match[2]
+ }
+ }
+ }
+ return parsed, nil
+}
+
+type wrapTransport struct {
+ out io.Writer
+ orig http.RoundTripper
+}
+
+func (wt *wrapTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if wt.out != nil {
+ _ = printRequest(req, wt.out)
+ }
+ resp, err := wt.orig.RoundTrip(req)
+ if wt.out != nil {
+ if err == nil {
+ _ = printResponse(resp, wt.out)
+ }
+ if err != nil {
+ fmt.Fprintf(wt.out, "%s\n~~~ Error ~~~\n%s\n", strings.Repeat("-", 80), err.Error())
+ }
+ fmt.Fprintf(wt.out, "%s\n", strings.Repeat("=", 80))
+ }
+ return resp, err
+}
+
+type detectManifest struct {
+ MediaType string `json:"mediaType"`
+ Subject *image.Descriptor `json:"subject,omitempty"`
+}
+
+func detectMediaType(body []byte) string {
+ det := detectManifest{
+ MediaType: "application/vnd.oci.image.manifest.v1+json",
+ }
+ _ = json.Unmarshal(body, &det)
+ return det.MediaType
+}
+
+func detectSubject(body []byte) *image.Descriptor {
+ det := detectManifest{}
+ _ = json.Unmarshal(body, &det)
+ return det.Subject
+}
+
+func cloneBodyReq(req *http.Request) ([]byte, error) {
+ if req.GetBody != nil {
+ rc, err := req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ out, err := io.ReadAll(rc)
+ _ = rc.Close()
+ return out, err
+ }
+ if req.Body == nil {
+ return []byte{}, nil
+ }
+ out, err := io.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ _ = req.Body.Close()
+ // replace the body with a buffer so it can be reused
+ req.Body = io.NopCloser(bytes.NewReader(out))
+ return out, err
+}
+
+func cloneBodyResp(resp *http.Response) ([]byte, error) {
+ if resp.Body == nil {
+ return []byte{}, nil
+ }
+ out, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ _ = resp.Body.Close()
+ // replace the body with a buffer so it can be reused
+ resp.Body = io.NopCloser(bytes.NewReader(out))
+ return out, err
+}
+
+func mediaTypeBase(orig string) string {
+ base, _, _ := strings.Cut(orig, ";")
+ return strings.TrimSpace(strings.ToLower(base))
+}
+
+func printBody(body []byte, w io.Writer) error {
+ if len(body) == 0 {
+ fmt.Fprintf(w, "--- Empty body ---\n")
+ return nil
+ }
+ ct := http.DetectContentType(body)
+ switch mediaTypeBase(ct) {
+ case "application/json", "text/plain":
+ fmt.Fprintf(w, "%.*s\n", truncateBody, string(body))
+ if len(body) > truncateBody {
+ fmt.Fprintf(w, "--- Truncated body from %d to %d bytes ---\n", len(body), truncateBody)
+ }
+ default:
+ fmt.Fprintf(w, "--- Output of %s not supported, %d bytes not shown ---\n", ct, len(body))
+ }
+ return nil
+}
+
+func printHeaders(headers http.Header, w io.Writer) error {
+ fmt.Fprintf(w, "Headers:\n")
+ for _, k := range slices.Sorted(maps.Keys(headers)) {
+ fmt.Fprintf(w, " %25s: %v\n", k, headers[k])
+ }
+ return nil
+}
+
+func printRequest(req *http.Request, w io.Writer) error {
+ fmt.Fprintf(w, "%s\n~~~ REQUEST ~~~\n", strings.Repeat("=", 80))
+ fmt.Fprintf(w, "Method: %s\nURL: %s\n", req.Method, req.URL.String())
+ printHeaders(req.Header, w)
+ body, err := cloneBodyReq(req)
+ if err != nil {
+ return err
+ }
+ printBody(body, w)
+
+ return nil
+}
+
+func printResponse(resp *http.Response, w io.Writer) error {
+ fmt.Fprintf(w, "%s\n~~~ RESPONSE ~~~\n", strings.Repeat("-", 80))
+ fmt.Fprintf(w, "Status: %d\n", resp.StatusCode)
+ printHeaders(resp.Header, w)
+ body, err := cloneBodyResp(resp)
+ if err != nil {
+ return err
+ }
+ printBody(body, w)
+
+ return nil
+}
+
+type redactWriter struct {
+ w io.Writer
+}
+
+var (
+ redactRegexp = regexp.MustCompile(`(?i)("?\w*(?:authorization|token|state)\w*"?(?:=|:)\s*(?:\[)?\s*"?\s*(?:(?:bearer|basic)? )?)[^\s?&"\]]*`)
+ redactReplace = []byte("$1*****")
+)
+
+func (rw redactWriter) Write(p []byte) (int, error) {
+ pRedact := redactRegexp.ReplaceAll(p, redactReplace)
+ n, err := rw.w.Write(pRedact)
+ if err != nil || n != len(pRedact) {
+ return 0, err
+ }
+ return len(p), nil
+}
diff --git a/conformance2/config.go b/conformance2/config.go
new file mode 100644
index 00000000..0ad3bbd0
--- /dev/null
+++ b/conformance2/config.go
@@ -0,0 +1,659 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "runtime/debug"
+ "strconv"
+ "strings"
+
+ "github.com/goccy/go-yaml"
+)
+
+const (
+ confGoTag = "conformance"
+ envOCIConf = "OCI"
+ envOCIConfFile = "OCI_CONFIGURATION"
+ envOCIVersion = "OCI_VERSION"
+ defaultOCIConf = "oci-conformance.yaml"
+ chunkMin = 1024
+ truncateBody = 4096
+ biVCSCommit = "vcs.revision"
+)
+
+type config struct {
+ Registry string `conformance:"REGISTRY" yaml:"registry"` // hostname:port of registry server
+ TLS tls `conformance:"TLS" yaml:"tls"` // tls configuration for communicating with the registry
+ Repo1 string `conformance:"REPO1" yaml:"repo1"` // first repository for pushing content
+ Repo2 string `conformance:"REPO2" yaml:"repo2"` // second repository for pushing content
+ LoginUser string `conformance:"USERNAME" yaml:"username"` // username for login, leave blank for anonymous
+ LoginPass string `conformance:"PASSWORD" yaml:"password"` // password for login, leave blank for anonymous
+ LogLevel string `conformance:"LOG" yaml:"logging"` // slog logging level, defaults to "warn"
+ LogWriter io.Writer `yaml:"-"` // writer used for logging, defaults to os.Stderr
+ APIs configAPI `conformance:"API" yaml:"apis"` // API tests to run
+ Data configData `conformance:"DATA" yaml:"data"` // data types to test
+ ROData configROData `conformance:"RO_DATA" yaml:"roData"` // read-only data for registries that do not support push methods
+ ResultsDir string `conformance:"RESULTS_DIR" yaml:"resultsDir"` // directory to write results
+ Version string `conformance:"VERSION" yaml:"version"` // spec version used to set test defaults
+ schemeReg string `yaml:"-"` // base for url to access the registry
+ Commit string `yaml:"commit"` // injected git commit hash from runtime
+ Legacy bool `yaml:"legacy,omitempty"` // injected to indicate that conformance was run with "go test"
+}
+
+type tls int
+
+const (
+ tlsEnabled tls = iota
+ tlsInsecure
+ tlsDisabled
+)
+
+type configAPI struct {
+ Pull bool `conformance:"PULL" yaml:"pull"`
+ Push bool `conformance:"PUSH" yaml:"push"`
+ Blobs configBlobs `conformance:"BLOBS" yaml:"blobs"`
+ Manifests configManifests `conformance:"MANIFESTS" yaml:"manifests"`
+ Tags configTags `conformance:"TAG" yaml:"tags"`
+ Referrer bool `conformance:"REFERRER" yaml:"referrer"`
+}
+
+type configBlobs struct {
+ Atomic bool `config:"ATOMIC" yaml:"atomic"`
+ Delete bool `config:"DELETE" yaml:"delete"`
+ MountAnonymous bool `config:"MOUNT_ANONYMOUS" yaml:"mountAnonymous"`
+}
+
+type configManifests struct {
+ Atomic bool `config:"ATOMIC" yaml:"atomic"`
+ Delete bool `config:"DELETE" yaml:"delete"`
+}
+
+type configTags struct {
+ Atomic bool `config:"ATOMIC" yaml:"atomic"`
+ Delete bool `config:"DELETE" yaml:"delete"`
+ List bool `config:"LIST" yaml:"list"`
+}
+
+type configData struct {
+ Image bool `conformance:"IMAGE" yaml:"image"` // standard OCI image
+ Index bool `conformance:"INDEX" yaml:"index"` // multi-platform manifest
+ IndexList bool `conformance:"INDEX_LIST" yaml:"indexList"` // nested index
+ Sparse bool `conformance:"SPARSE" yaml:"sparse"` // TODO: multi-platform manifest with missing entries
+ Artifact bool `conformance:"ARTIFACT" yaml:"artifact"` // OCI artifact
+ Subject bool `conformance:"SUBJECT" yaml:"subject"` // artifact with the subject defined
+ SubjectMissing bool `conformance:"SUBJECT_MISSING" yaml:"subjectMissing"` // artifact with a missing subject
+ ArtifactList bool `conformance:"ARTIFACT_LIST" yaml:"artifactList"` // index of artifacts
+ SubjectList bool `conformance:"SUBJECT_LIST" yaml:"subjectList"` // index with a subject
+ DataField bool `conformance:"DATA_FIELD" yaml:"dataField"` // data field in descriptor
+ Nondistributable bool `conformance:"NONDISTRIBUTABLE" yaml:"nondistributable"` // nondistributable image, deprecated in spec 1.1
+ CustomFields bool `conformance:"CUSTOM_FIELDS" yaml:"customFields"` // fields added beyond the OCI spec
+ EmptyBlob bool `conformance:"EMPTY_BLOB" yaml:"emptyBlob"` // a zero byte blob
+ Sha512 bool `conformance:"SHA512" yaml:"sha512"` // sha512 digest algorithm
+}
+
+type configROData struct {
+ Tags []string `conformance:"TAGS" yaml:"tags"` // tag names
+ Manifests []string `conformance:"MANIFESTS" yaml:"manifests"` // manifest digests
+ Blobs []string `conformance:"BLOBS" yaml:"blobs"` // blob digests
+ Referrers []string `conformance:"REFERRERS" yaml:"referrers"` // referrers subject digests
+}
+
+func configLoad() (config, error) {
+ // read config from yaml file if available
+ loadFile := ""
+ configFile := []byte{}
+ if filename, ok := os.LookupEnv(envOCIConfFile); ok {
+ loadFile = filename
+ } else if fi, err := os.Stat(defaultOCIConf); err == nil && !fi.IsDir() {
+ loadFile = defaultOCIConf
+ }
+ if loadFile != "" {
+ fh, err := os.Open(loadFile)
+ if err != nil {
+ return config{}, err
+ }
+ configFile, err = io.ReadAll(fh)
+ _ = fh.Close()
+ if err != nil {
+ return config{}, err
+ }
+ }
+ // extract the version from the config file or env variable
+ configVersion := ""
+ if len(configFile) > 0 {
+ verStruct := struct {
+ Version string `yaml:"version"`
+ }{}
+ err := yaml.Unmarshal(configFile, &verStruct)
+ if err != nil {
+ return config{}, err
+ }
+ configVersion = verStruct.Version
+ }
+ configVersionEnv := os.Getenv(envOCIVersion)
+ if configVersionEnv != "" {
+ configVersion = configVersionEnv
+ }
+ // initialize config with default values based on spec version
+ c := config{
+ Registry: "localhost:5000",
+ Repo1: "conformance/repo1",
+ Repo2: "conformance/repo2",
+ LogLevel: "warn",
+ LogWriter: os.Stderr,
+ ResultsDir: "./results",
+ APIs: configAPI{
+ Pull: true,
+ Push: true,
+ Blobs: configBlobs{
+ Atomic: true,
+ Delete: true,
+ MountAnonymous: true,
+ },
+ Manifests: configManifests{
+ Atomic: true,
+ Delete: true,
+ },
+ Tags: configTags{
+ Atomic: true,
+ Delete: true,
+ List: true,
+ },
+ Referrer: true,
+ },
+ Data: configData{
+ Image: true,
+ Index: true,
+ IndexList: true,
+ Sparse: false,
+ Artifact: true,
+ Subject: true,
+ SubjectMissing: true,
+ ArtifactList: true,
+ SubjectList: true,
+ DataField: true,
+ Nondistributable: true,
+ CustomFields: true,
+ EmptyBlob: true,
+ Sha512: true,
+ },
+ }
+ switch configVersion {
+ case "", "1.1":
+ c.Version = "1.1"
+ case "1.0":
+ c.APIs.Blobs.MountAnonymous = false
+ c.APIs.Referrer = false
+ c.Version = "1.0"
+ default:
+ return config{}, fmt.Errorf("unsupported config version %s", configVersion)
+ }
+ // process legacy variables but warn user when they are seen
+ err := confLegacyEnv(&c)
+ if err != nil {
+ return c, err
+ }
+ // read config from yaml file if available
+ if len(configFile) > 0 {
+ err := yaml.Unmarshal(configFile, &c)
+ if err != nil {
+ return c, err
+ }
+ }
+ // parse config from environment variables, overriding any yaml settings
+ err = confFromEnv(envOCIConf, confGoTag, reflect.ValueOf(&c))
+ if err != nil {
+ return c, err
+ }
+ // setup computed values
+ scheme := "https"
+ if c.TLS == tlsDisabled {
+ scheme = "http"
+ }
+ c.schemeReg = fmt.Sprintf("%s://%s", scheme, c.Registry)
+ // load the commit from the build info
+ if bi, ok := debug.ReadBuildInfo(); ok && bi != nil {
+ for _, setting := range bi.Settings {
+ if setting.Key == biVCSCommit {
+ c.Commit = setting.Value
+ break
+ }
+ }
+ }
+ return c, nil
+}
+
+func (t tls) MarshalText() ([]byte, error) {
+ var s string
+ switch t {
+ default:
+ s = "enabled" // by default, TLS is enabled
+ case tlsInsecure:
+ s = "insecure"
+ case tlsDisabled:
+ s = "disabled"
+ }
+ return []byte(s), nil
+}
+
+func (t *tls) UnmarshalText(b []byte) error {
+ switch strings.ToLower(string(b)) {
+ default:
+ *t = tlsEnabled
+ case "insecure":
+ *t = tlsInsecure
+ case "disabled":
+ *t = tlsDisabled
+ }
+ return nil
+}
+
+func confFromEnv(env, tag string, vp reflect.Value) error {
+ vpt := vp.Type()
+ if vpt.Kind() != reflect.Pointer {
+ return fmt.Errorf("confFromEnv requires a pointer input")
+ }
+ if vp.IsZero() {
+ return nil // nil pointer
+ }
+ v := reflect.Indirect(vp)
+ if v.Kind() == reflect.Pointer {
+ // pointer to a pointer, recurse
+ return confFromEnv(env, tag, v)
+ }
+ if v.Kind() == reflect.Struct {
+ // expand each field, adding to prefix and recursing on pointer to the entry
+ for i := 0; i < v.NumField(); i++ {
+ vtf := v.Type().Field(i)
+ tagVal := vtf.Tag.Get(tag)
+ if tagVal != "" {
+ if !v.Field(i).CanAddr() {
+ return fmt.Errorf("unable to generate address on %s", v.Field(i).Type().Name())
+ }
+ tagEnv := fmt.Sprintf("%s_%s", env, tagVal)
+ err := confFromEnv(tagEnv, tag, v.Field(i).Addr())
+ if err != nil {
+ return fmt.Errorf("field failed \"%s\": %w", v.Field(i).Type().Name(), err)
+ }
+ }
+ }
+ return nil
+ }
+
+ // get the value from the environment
+ val := os.Getenv(env)
+ if val == "" {
+ // skip undefined env variables
+ return nil
+ }
+
+ // try to unmarshal with a built in method
+ if mt, ok := vp.Interface().(interface{ UnmarshalText(b []byte) error }); ok {
+ err := mt.UnmarshalText([]byte(val))
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal \"%s\": %w", env, err)
+ }
+ return nil
+ }
+
+ // fall back to extracting by the kind
+ switch v.Kind() {
+ case reflect.String:
+ v.SetString(val)
+ case reflect.Bool:
+ b, err := strconv.ParseBool(val)
+ if err != nil {
+ return fmt.Errorf("failed to parse bool value from environment %s=%s", env, val)
+ }
+ v.SetBool(b)
+ case reflect.Slice:
+ switch v.Type().Elem().Kind() {
+ case reflect.String:
+ valSlice := strings.Split(val, " ")
+ newSlice := reflect.MakeSlice(v.Type(), len(valSlice), len(valSlice))
+ for i, cur := range valSlice {
+ newSlice.Index(i).SetString(cur)
+ }
+ v.Set(newSlice)
+ default:
+ return fmt.Errorf("unsupported slice of kind: %s", v.Type().Elem().Kind())
+ }
+ default:
+ // unhandled type
+ return fmt.Errorf("unsupported kind: %s", v.Kind())
+ }
+ return nil
+}
+
+func confLegacyEnv(c *config) error {
+ // Note: some legacy variables are not converted:
+ // export OCI_HIDE_SKIPPED_WORKFLOWS=0
+ // export OCI_DELETE_MANIFEST_BEFORE_BLOBS=0
+ if v := os.Getenv("OCI_ROOT_URL"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_REPO_URL is deprecated, use OCI_REGISTRY and OCI_TLS instead\n")
+ v := strings.TrimSuffix(strings.TrimSpace(strings.ToLower(v)), "/")
+ vSplit := strings.SplitN(v, "://", 2)
+ scheme := "https"
+ reg := v
+ if len(vSplit) == 2 {
+ scheme = vSplit[0]
+ reg = vSplit[1]
+ }
+ switch scheme {
+ case "http":
+ c.TLS = tlsDisabled
+ default:
+ c.TLS = tlsEnabled
+ }
+ c.Registry = reg
+ }
+ if v := os.Getenv("OCI_NAMESPACE"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_NAMESPACE is deprecated, use OCI_REPO1 instead\n")
+ c.Repo1 = strings.TrimSuffix(strings.TrimSpace(strings.ToLower(v)), "/")
+ }
+ if v := os.Getenv("OCI_CROSSMOUNT_NAMESPACE"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_CROSSMOUNT_NAMESPACE is deprecated, use OCI_REPO2 instead\n")
+ c.Repo2 = strings.TrimSuffix(strings.TrimSpace(strings.ToLower(v)), "/")
+ }
+ if v := os.Getenv("OCI_TEST_PULL"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_PULL is deprecated, use OCI_API_PULL instead\n")
+ b, err := strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_PULL", v)
+ }
+ c.APIs.Pull = b
+ }
+ if v := os.Getenv("OCI_TEST_PUSH"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_PUSH is deprecated, use OCI_API_PUSH instead\n")
+ b, err := strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_PUSH", v)
+ }
+ c.APIs.Push = b
+ }
+ if v := os.Getenv("OCI_TEST_CONTENT_DISCOVERY"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_CONTENT_DISCOVERY is deprecated, use OCI_API_TAG_LIST and OCI_API_REFERRER instead\n")
+ b, err := strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_CONTENT_DISCOVERY", v)
+ }
+ c.APIs.Tags.List = b
+ c.APIs.Referrer = b
+ }
+ if v := os.Getenv("OCI_TEST_CONTENT_MANAGEMENT"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_CONTENT_MANAGEMENT is deprecated, use OCI_API_TAG_DELETE, OCI_API_MANIFEST_DELETE, and OCI_API_BLOB_DELETE instead\n")
+ b, err := strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_CONTENT_MANAGEMENT", v)
+ }
+ c.APIs.Tags.Delete = b
+ c.APIs.Manifests.Delete = b
+ c.APIs.Blobs.Delete = b
+ }
+ if v := os.Getenv("OCI_DEBUG"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_DEBUG is deprecated, use OCI_LOG=debug instead\n")
+ b, err := strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_DEBUG", v)
+ }
+ if b {
+ c.LogLevel = "debug"
+ }
+ }
+ if v := os.Getenv("OCI_TAG_NAME"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_TAG_NAME is deprecated, use OCI_RO_DATA_TAGS instead\n")
+ c.ROData.Tags = append(c.ROData.Tags, strings.Split(v, " ")...)
+ }
+ if v := os.Getenv("OCI_MANIFEST_DIGEST"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_MANIFEST_DIGEST is deprecated, use OCI_RO_DATA_MANIFESTS instead\n")
+ c.ROData.Manifests = append(c.ROData.Manifests, strings.Split(v, " ")...)
+ }
+ if v := os.Getenv("OCI_BLOB_DIGEST"); v != "" {
+ fmt.Fprintf(os.Stderr, "WARNING: OCI_BLOB_DIGEST is deprecated, use OCI_RO_DATA_BLOBS instead\n")
+ c.ROData.Blobs = append(c.ROData.Blobs, strings.Split(v, " ")...)
+ }
+ return nil
+}
+
+func (c config) Report() string {
+ // censor credentials
+ if c.LoginUser != "" {
+ c.LoginUser = "***"
+ }
+ if c.LoginPass != "" {
+ c.LoginPass = "***"
+ }
+ b, err := yaml.Marshal(c)
+ if err != nil {
+ return fmt.Sprintf("failed to marshal config: %v", err)
+ }
+ return string(b)
+}
+
+var confHTMLTemplates = map[string]string{
+ "report": `
+
+ OCI Distribution Conformance Tests
+
+
+
+ OCI Distribution Conformance Tests
+ {{- if .Config.Legacy }}
+ WARNING: Running conformance with "go test" is deprecated, please update to "go build"
+ {{- end }}
+ {{ template "summary" . }}
+
+ {{ template "results" .Results }}
+
+
+`,
+ "summary": `
+
+
+
+ | Summary |
+
+
+ {{- if gt .NumPassed 0 -}}
+
+ {{- if .AllPassed -}}All {{ end -}}{{ .NumPassed }} passed
+ {{- end -}}
+ {{- if gt .NumFailed 0 -}}
+
+ {{- if .AllFailed -}}All {{ end -}}{{ .NumFailed }} failed
+ {{- end -}}
+ {{- if gt .NumSkipped 0 -}}
+
+ {{- if .AllSkipped -}}All {{ end -}}{{ .NumSkipped }} skipped
+ {{- end -}}
+
+
+ |
+
+
+ | Start Time |
+ {{ .StartTimeString }} |
+
+
+ | End Time |
+ {{ .EndTimeString }} |
+
+
+ | Time Elapsed |
+ {{ .RunTime }} |
+
+
+ | Tested Spec |
+ {{ .Config.Version }} |
+
+
+ | Conformance Commit |
+ {{ .Config.Commit }} |
+
+
+ | Configuration |
+ {{ .Config.Report }} |
+
+
`,
+ "results": `
+
+
{{ .Name }}
+ {{- if ne .Output.String "" }}
+ Output:
+ {{- html .Output.String -}}
+ {{- end }}
+ {{- if ne ( len .Errs ) 0 }}
+ Errors:
+ {{- range $err := .Errs }}
+ {{ html $err.Error }}
+ {{- end }}
+ {{- end }}
+ {{- range $result := .Children }}
+ {{template "results" $result }}
+ {{- end }}
+
+
+ `,
+ "status-color": `
+ {{- if eq .String "Pass" }}green
+ {{- else if eq .String "FAIL" }}red
+ {{- else if eq .String "Error" }}red
+ {{- else if eq .String "Skip" }}grey
+ {{- else if eq .String "Disabled" }}grey
+ {{- end }}`,
+}
diff --git a/conformance2/errors.go b/conformance2/errors.go
new file mode 100644
index 00000000..f7dd4a7f
--- /dev/null
+++ b/conformance2/errors.go
@@ -0,0 +1,8 @@
+package main
+
+import "errors"
+
+var (
+ ErrDisabled = errors.New("test is disabled")
+ ErrRegUnsupported = errors.New("registry does not support the requested API")
+)
diff --git a/conformance2/go.mod b/conformance2/go.mod
new file mode 100644
index 00000000..f322831b
--- /dev/null
+++ b/conformance2/go.mod
@@ -0,0 +1,10 @@
+module github.com/opencontainers/distribution-spec/conformance2
+
+go 1.24.0
+
+require (
+ github.com/goccy/go-yaml v1.18.0
+ github.com/opencontainers/distribution-spec/specs-go v0.0.0-20240926185104-8376368dd8aa
+ github.com/opencontainers/go-digest v1.0.0
+ github.com/opencontainers/image-spec v1.1.1
+)
diff --git a/conformance2/go.sum b/conformance2/go.sum
new file mode 100644
index 00000000..696b89f8
--- /dev/null
+++ b/conformance2/go.sum
@@ -0,0 +1,8 @@
+github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
+github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
+github.com/opencontainers/distribution-spec/specs-go v0.0.0-20240926185104-8376368dd8aa h1:CUcrPKAP0lYp3xAHghAYBR6rFI+BaW0nWAvhHwjLQYM=
+github.com/opencontainers/distribution-spec/specs-go v0.0.0-20240926185104-8376368dd8aa/go.mod h1:Va0IMqkjv62YSEytL4sgxrkiD9IzU0T0bX/ZZEtMnSQ=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
diff --git a/conformance2/junit.go b/conformance2/junit.go
new file mode 100644
index 00000000..de385b07
--- /dev/null
+++ b/conformance2/junit.go
@@ -0,0 +1,61 @@
+package main
+
+import "encoding/xml"
+
+const (
+ junitPassed = "passed" // successful test
+ junitSkipped = "skipped" // test intentionally skipped
+ junitFailure = "failure" // test ran but failed, e.g. missed assertion
+ junitError = "error" // test encountered an unexpected error
+)
+
+type junitProperty struct {
+ Name string `xml:"name,attr"` // name or key
+ Value string `xml:"value,attr"` // value of name
+}
+
+type junitResult struct {
+ Message string `xml:"message,attr"`
+ Type string `xml:"type,attr,omitempty"`
+ Data string `xml:",cdata"`
+}
+
+type junitTest struct {
+ Name string `xml:"name,attr"` // name of the test
+ Classname string `xml:"classname,attr"` // hierarch of test
+ Time string `xml:"time,attr,omitempty"` // duration in seconds
+ Status string `xml:"status,attr,omitempty"` // passed, skipped, failure, or error
+ Skipped *junitResult `xml:"skipped,omitempty"` // result from skipped tests
+ Failure *junitResult `xml:"failure,omitempty"` // result from test failures
+ Error *junitResult `xml:"error,omitempty"` // result from test errors
+ SystemOut string `xml:"system-out,omitempty"` // output written to stdout
+ SystemErr string `xml:"system-err,omitempty"` // output written to stderr
+}
+
+type junitTestSuite struct {
+ Name string `xml:"name,attr"` // name of suite
+ Package string `xml:"package,attr,omitempty"` // hierarchy of suite
+ Tests int `xml:"tests,attr"` // count of tests
+ Failures int `xml:"failures,attr"` // count of failures
+ Errors int `xml:"errors,attr"` // count of errors
+ Disabled int `xml:"disabled,attr,omitempty"` // count of disabled tests
+ Skipped int `xml:"skipped,attr,omitempty"` // count of skipped tests
+ Time string `xml:"time,attr"` // duration in seconds
+ Timestamp string `xml:"timestamp,attr,omitempty"` // ISO8601
+ Properties []junitProperty `xml:"properties>property,omitempty"` // mapping of key/value pairs associated with the test
+ Testcases []junitTest `xml:"testcase,omitempty"` // slice of tests
+ SystemOut string `xml:"system-out,omitempty"` // output written to stdout
+ SystemErr string `xml:"system-err,omitempty"` // output written to stderr
+}
+
+type junitTestSuites struct {
+ XMLName xml.Name `xml:"testsuites"` // xml namespace and name
+ Name string `xml:"name,attr,omitempty"` // name of the collection of suites
+ Time string `xml:"time,attr,omitempty"` // duration in seconds
+ Tests int `xml:"tests,attr,omitempty"` // count of tests
+ Errors int `xml:"errors,attr,omitempty"` // count of errors
+ Failures int `xml:"failures,attr,omitempty"` // count of failures
+ Skipped int `xml:"skipped,attr,omitempty"` // count of skipped tests
+ Disabled int `xml:"disabled,attr,omitempty"` // count of disabled tests
+ Suites []junitTestSuite `xml:"testsuite,omitempty"` // slice of suites
+}
diff --git a/conformance2/legacy_test.go b/conformance2/legacy_test.go
new file mode 100644
index 00000000..ca610e99
--- /dev/null
+++ b/conformance2/legacy_test.go
@@ -0,0 +1,9 @@
+//go:build legacy || !unit_tests
+
+package main
+
+import "testing"
+
+func TestLegacy(t *testing.T) {
+ mainRun(true)
+}
diff --git a/conformance2/main.go b/conformance2/main.go
new file mode 100644
index 00000000..607d9b51
--- /dev/null
+++ b/conformance2/main.go
@@ -0,0 +1,71 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+func main() {
+ mainRun(false)
+}
+
+func mainRun(legacy bool) {
+ // load config
+ c, err := configLoad()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to load config: %v\n", err)
+ return
+ }
+ c.Legacy = legacy
+ // run all tests
+ r, err := runnerNew(c)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to setup test: %v\n", err)
+ return
+ }
+ _ = r.TestAll()
+ // show results
+ r.Report(os.Stdout)
+ // generate reports
+ os.MkdirAll(c.ResultsDir, 0755)
+ // write config.yaml
+ fh, err := os.Create(filepath.Join(c.ResultsDir, "config.yaml"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to create config.yaml: %v\n", err)
+ return
+ }
+ _, err = fh.Write([]byte(r.Config.Report()))
+ _ = fh.Close()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to generate config.yaml: %v\n", err)
+ return
+ }
+ // write junit.xml report
+ fh, err = os.Create(filepath.Join(c.ResultsDir, "junit.xml"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to create junit.xml: %v\n", err)
+ return
+ }
+ err = r.ReportJunit(fh)
+ _ = fh.Close()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to generate junit.xml: %v\n", err)
+ return
+ }
+ // write report.html
+ fh, err = os.Create(filepath.Join(c.ResultsDir, "report.html"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to create report.html: %v\n", err)
+ return
+ }
+ err = r.ReportHTML(fh)
+ _ = fh.Close()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to generate report.html: %v\n", err)
+ return
+ }
+ if c.Legacy {
+ fmt.Fprintf(os.Stderr, "WARNING: \"go test\" is deprecated. Please update to using \"go build\".\n")
+ }
+}
diff --git a/conformance2/results.go b/conformance2/results.go
new file mode 100644
index 00000000..9a92083e
--- /dev/null
+++ b/conformance2/results.go
@@ -0,0 +1,161 @@
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+type results struct {
+ Name string // name of current runner step, concatenated onto the parent's name
+ Children []*results
+ Parent *results
+ Status status
+ Errs []error
+ Output *bytes.Buffer
+ Start time.Time
+ Stop time.Time
+ Counts [statusMax]int
+}
+
+func resultsNew(name string, parent *results) *results {
+ fullName := name
+ if parent != nil && parent.Name != "" {
+ fullName = fmt.Sprintf("%s/%s", parent.Name, name)
+ }
+ return &results{
+ Name: fullName,
+ Parent: parent,
+ Output: &bytes.Buffer{},
+ Start: time.Now(),
+ }
+}
+
+func (r *results) Count(s string) int {
+ st := statusUnknown
+ err := st.UnmarshalText([]byte(s))
+ if err != nil || st < 0 || st >= statusMax {
+ return -1
+ }
+ return r.Counts[st]
+}
+
+func (r *results) ReportWalkErr(w io.Writer, prefix string) {
+ fmt.Fprintf(w, "%s%s: %s\n", prefix, r.Name, r.Status)
+ if len(r.Children) == 0 && len(r.Errs) > 0 {
+ // show errors from leaf nodes
+ for _, err := range r.Errs {
+ fmt.Fprintf(w, "%s - %s\n", prefix, err.Error())
+ }
+ }
+ if len(r.Children) > 0 {
+ for _, child := range r.Children {
+ child.ReportWalkErr(w, prefix+" ")
+ }
+ }
+}
+
+func (r *results) ToJunitTestCases() []junitTest {
+ jTests := []junitTest{}
+ if len(r.Children) == 0 {
+ // return the test case for a leaf node
+ jTest := junitTest{
+ Name: r.Name,
+ Time: fmt.Sprintf("%f", r.Stop.Sub(r.Start).Seconds()),
+ SystemErr: r.Output.String(),
+ Status: r.Status.ToJunit(),
+ }
+ if len(r.Errs) > 0 {
+ jTest.SystemOut = fmt.Sprintf("%v", errors.Join(r.Errs...))
+ }
+ jTests = append(jTests, jTest)
+ }
+ if len(r.Children) > 0 {
+ // recursively collect test cases from child nodes
+ for _, child := range r.Children {
+ jTests = append(jTests, child.ToJunitTestCases()...)
+ }
+ }
+ return jTests
+}
+
+type status int
+
+const (
+ statusUnknown status = iota // status is undefined
+ statusDisabled // test was disabled by configuration
+ statusSkip // test was skipped
+ statusPass // test passed
+ statusFail // test detected a conformance failure
+ statusError // failure of the test engine itself
+ statusMax // only used for allocating arrays
+)
+
+func (s status) Set(set status) status {
+ // only set status to a higher level
+ if set > s {
+ return set
+ }
+ return s
+}
+
+func (s status) String() string {
+ switch s {
+ case statusPass:
+ return "Pass"
+ case statusSkip:
+ return "Skip"
+ case statusDisabled:
+ return "Disabled"
+ case statusFail:
+ return "FAIL"
+ case statusError:
+ return "Error"
+ default:
+ return "Unknown"
+ }
+}
+
+func (s status) MarshalText() ([]byte, error) {
+ ret := s.String()
+ if ret == "Unknown" {
+ return []byte(ret), fmt.Errorf("unknown status %d", s)
+ }
+ return []byte(ret), nil
+}
+
+func (s *status) UnmarshalText(text []byte) error {
+ switch strings.ToLower(string(text)) {
+ case "pass":
+ *s = statusPass
+ case "skip":
+ *s = statusSkip
+ case "disabled":
+ *s = statusDisabled
+ case "fail":
+ *s = statusDisabled
+ case "error":
+ *s = statusError
+ case "unknown":
+ *s = statusUnknown
+ default:
+ return fmt.Errorf("unknown status %s", string(text))
+ }
+ return nil
+}
+
+func (s status) ToJunit() string {
+ switch s {
+ case statusPass:
+ return junitPassed
+ case statusSkip, statusDisabled:
+ return junitSkipped
+ case statusFail:
+ return junitFailure
+ default:
+ return junitError
+ }
+}
diff --git a/conformance2/run.go b/conformance2/run.go
new file mode 100644
index 00000000..07f2f3c1
--- /dev/null
+++ b/conformance2/run.go
@@ -0,0 +1,1684 @@
+package main
+
+import (
+ "crypto/rand"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "html/template"
+ "io"
+ "log/slog"
+ "math"
+ "net/http"
+ "os"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+ image "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+const (
+ testName = "OCI Conformance Test"
+)
+
+var (
+ errTestAPIError = errors.New("API test encountered an internal error")
+ errTestAPIFail = errors.New("API test with a known failure")
+ errTestAPISkip = errors.New("API test was skipped")
+ dataTests = []string{}
+ dataFailManifestTests = []struct {
+ tdName string
+ opts []apiDoOpt
+ }{}
+)
+
+type runner struct {
+ Config config
+ API *api
+ State *state
+ Results *results
+ Log *slog.Logger
+}
+
+func runnerNew(c config) (*runner, error) {
+ lvl := slog.LevelWarn
+ if c.LogLevel != "" {
+ err := lvl.UnmarshalText([]byte(c.LogLevel))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse logging level %s: %w", c.LogLevel, err)
+ }
+ }
+ if c.LogWriter == nil {
+ c.LogWriter = os.Stderr
+ }
+ apiOpts := []apiOpt{}
+ if c.LoginUser != "" && c.LoginPass != "" {
+ apiOpts = append(apiOpts, apiWithAuth(c.LoginUser, c.LoginPass))
+ }
+ r := runner{
+ Config: c,
+ API: apiNew(http.DefaultClient, apiOpts...),
+ State: stateNew(),
+ Results: resultsNew(testName, nil),
+ Log: slog.New(slog.NewTextHandler(c.LogWriter, &slog.HandlerOptions{Level: lvl})),
+ }
+ return &r, nil
+}
+
+func (r *runner) GenerateData() error {
+ var tdName string
+ if !r.Config.Data.Image {
+ // all data tests require the image manifest
+ return nil
+ }
+ // include empty tests for user provided read-only data, no validation is done on the content of the response since we don't know it
+ if len(r.Config.ROData.Tags) > 0 || len(r.Config.ROData.Manifests) > 0 || len(r.Config.ROData.Blobs) > 0 || len(r.Config.ROData.Referrers) > 0 {
+ tdName = "read-only"
+ r.State.Data[tdName] = newTestData("Read Only Inputs")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ for _, tag := range r.Config.ROData.Tags {
+ r.State.Data[tdName].tags[tag] = ""
+ }
+ for _, manifest := range r.Config.ROData.Manifests {
+ dig, err := digest.Parse(manifest)
+ if err != nil {
+ return fmt.Errorf("failed to parse manifest digest %s: %w", manifest, err)
+ }
+ r.State.Data[tdName].manifests[dig] = []byte{}
+ r.State.Data[tdName].manOrder = append(r.State.Data[tdName].manOrder, dig)
+ }
+ for _, blob := range r.Config.ROData.Blobs {
+ dig, err := digest.Parse(blob)
+ if err != nil {
+ return fmt.Errorf("failed to parse blob digest %s: %w", blob, err)
+ }
+ r.State.Data[tdName].blobs[dig] = []byte{}
+ }
+ for _, subject := range r.Config.ROData.Referrers {
+ dig, err := digest.Parse(subject)
+ if err != nil {
+ return fmt.Errorf("failed to parse subject digest %s: %w", subject, err)
+ }
+ r.State.Data[tdName].referrers[dig] = []*image.Descriptor{}
+ }
+ }
+ if !r.Config.APIs.Push {
+ // do not generate random data if push is disabled
+ return nil
+ }
+ // standard image with a layer per blob test
+ tdName = "image"
+ r.State.Data[tdName] = newTestData("Image")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ _, err := r.State.Data[tdName].genManifestFull(
+ genWithTag("image"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ tdName = "image-uncompressed"
+ r.State.Data[tdName] = newTestData("Image Uncompressed")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ _, err = r.State.Data[tdName].genManifestFull(
+ genWithTag("image-uncompressed"),
+ genWithCompress(genCompUncomp),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ // multi-platform index
+ if r.Config.Data.Index {
+ tdName = "index"
+ r.State.Data[tdName] = newTestData("Index")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ _, err = r.State.Data[tdName].genIndexFull(
+ genWithTag("index"),
+ genWithPlatforms([]*image.Platform{
+ {OS: "linux", Architecture: "amd64"},
+ {OS: "linux", Architecture: "arm64"},
+ }),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // index containing an index
+ if r.Config.Data.Index && r.Config.Data.IndexList {
+ tdName = "nested-index"
+ r.State.Data[tdName] = newTestData("Nested Index")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ dig1, err := r.State.Data[tdName].genIndexFull(
+ genWithPlatforms([]*image.Platform{
+ {OS: "linux", Architecture: "amd64"},
+ {OS: "linux", Architecture: "arm64"},
+ }),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ dig2, err := r.State.Data[tdName].genIndexFull(
+ genWithPlatforms([]*image.Platform{
+ {OS: "linux", Architecture: "amd64"},
+ {OS: "linux", Architecture: "arm64"},
+ }),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ _, _, err = r.State.Data[tdName].genIndex([]*image.Platform{nil, nil}, []digest.Digest{dig1, dig2},
+ genWithTag("index-of-index"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // artifact manifest
+ if r.Config.Data.Artifact {
+ tdName = "artifact"
+ r.State.Data[tdName] = newTestData("Artifact")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ _, err = r.State.Data[tdName].genManifestFull(
+ genWithTag("artifact"),
+ genWithArtifactType("application/vnd.example.oci.conformance"),
+ genWithConfigMediaType("application/vnd.oci.empty.v1+json"),
+ genWithConfigBytes([]byte("{}")),
+ genWithLayerCount(1),
+ genWithLayerMediaType("application/vnd.example.oci.conformance"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // artifact index
+ if r.Config.Data.ArtifactList {
+ tdName = "artifact-index"
+ r.State.Data[tdName] = newTestData("Artifact Index")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ _, err = r.State.Data[tdName].genIndexFull(
+ genWithTag("artifact-index"),
+ genWithPlatforms([]*image.Platform{
+ {OS: "linux", Architecture: "amd64"},
+ {OS: "linux", Architecture: "arm64"},
+ }),
+ genWithArtifactType("application/vnd.example.oci.conformance"),
+ genWithConfigMediaType("application/vnd.oci.empty.v1+json"),
+ genWithConfigBytes([]byte("{}")),
+ genWithLayerCount(1),
+ genWithLayerMediaType("application/vnd.example.oci.conformance"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // image and two referrers
+ if r.Config.Data.Subject {
+ tdName = "artifacts-with-subject"
+ r.State.Data[tdName] = newTestData("Artifacts with Subject")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ subjDig, err := r.State.Data[tdName].genManifestFull(
+ genWithTag("image"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ subjDesc := *r.State.Data[tdName].desc[subjDig]
+ _, err = r.State.Data[tdName].genManifestFull(
+ genWithSubject(subjDesc),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ _, err = r.State.Data[tdName].genManifestFull(
+ genWithArtifactType("application/vnd.example.oci.conformance"),
+ genWithAnnotations(map[string]string{
+ "org.opencontainers.conformance": "hello conformance test",
+ }),
+ genWithAnnotationUniq(),
+ genWithConfigMediaType("application/vnd.oci.empty.v1+json"),
+ genWithConfigBytes([]byte("{}")),
+ genWithLayerCount(1),
+ genWithLayerMediaType("application/vnd.example.oci.conformance"),
+ genWithSubject(subjDesc),
+ genWithTag("tagged-artifact"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // index and artifact-index with a subject
+ if r.Config.Data.SubjectList {
+ tdName = "index-with-subject"
+ r.State.Data[tdName] = newTestData("Index with Subject")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ subjDig, err := r.State.Data[tdName].genIndexFull(
+ genWithTag("index"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ subjDesc := *r.State.Data[tdName].desc[subjDig]
+ _, err = r.State.Data[tdName].genIndexFull(
+ genWithArtifactType("application/vnd.example.oci.conformance"),
+ genWithAnnotations(map[string]string{
+ "org.opencontainers.conformance": "hello conformance test",
+ }),
+ genWithAnnotationUniq(),
+ genWithConfigMediaType("application/vnd.oci.empty.v1+json"),
+ genWithConfigBytes([]byte("{}")),
+ genWithLayerCount(1),
+ genWithLayerMediaType("application/vnd.example.oci.conformance"),
+ genWithSubject(subjDesc),
+ genWithTag("tagged-artifact"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // artifact with missing subject
+ if r.Config.Data.SubjectMissing {
+ tdName = "missing-subject"
+ r.State.Data[tdName] = newTestData("Missing Subject")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ subjDesc := image.Descriptor{
+ MediaType: "application/vnd.oci.image.manifest.v1+json",
+ Size: 123,
+ Digest: digest.FromString("missing content"),
+ }
+ _, err = r.State.Data[tdName].genManifestFull(
+ genWithArtifactType("application/vnd.example.oci.conformance"),
+ genWithAnnotations(map[string]string{
+ "org.opencontainers.conformance": "hello conformance test",
+ }),
+ genWithAnnotationUniq(),
+ genWithConfigMediaType("application/vnd.oci.empty.v1+json"),
+ genWithConfigBytes([]byte("{}")),
+ genWithLayerCount(1),
+ genWithLayerMediaType("application/vnd.example.oci.conformance"),
+ genWithSubject(subjDesc),
+ genWithTag("tagged-artifact"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // data field in descriptor
+ if r.Config.Data.DataField {
+ tdName = "data-field"
+ r.State.Data[tdName] = newTestData("Data Field")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ _, err := r.State.Data[tdName].genManifestFull(
+ genWithTag("data-field"),
+ genWithDescriptorData(),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // image with non-distributable layers
+ if r.Config.Data.Nondistributable {
+ tdName = "non-distributable-layers"
+ r.State.Data[tdName] = newTestData("Non-distributable Layers")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+
+ b := make([]byte, 256)
+ layers := make([]image.Descriptor, 3)
+ confDig := make([]digest.Digest, 3)
+ // first layer is compressed + non-distributable
+ _, err := rand.Read(b)
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ confDig[0] = digest.Canonical.FromBytes(b)
+ _, err = rand.Read(b)
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ dig := digest.Canonical.FromBytes(b)
+ layers[0] = image.Descriptor{
+ MediaType: "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip",
+ Digest: dig,
+ Size: 123456,
+ URLs: []string{"https://store.example.com/blobs/sha256/" + dig.Encoded()},
+ }
+ // second layer is uncompressed + non-distributable
+ _, err = rand.Read(b)
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ dig = digest.Canonical.FromBytes(b)
+ confDig[1] = dig
+ layers[1] = image.Descriptor{
+ MediaType: "application/vnd.oci.image.layer.nondistributable.v1.tar",
+ Digest: dig,
+ Size: 12345,
+ URLs: []string{"https://store.example.com/blobs/sha256/" + dig.Encoded()},
+ }
+ // third layer is normal
+ cDig, ucDig, _, err := r.State.Data[tdName].genLayer(1)
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ confDig[2] = ucDig
+ layers[2] = *r.State.Data[tdName].desc[cDig]
+ // generate the config
+ cDig, _, err = r.State.Data[tdName].genConfig(image.Platform{OS: "linux", Architecture: "amd64"}, confDig)
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ // generate the manifest
+ _, _, err = r.State.Data[tdName].genManifest(*r.State.Data[tdName].desc[cDig], layers,
+ genWithTag("non-distributable-image"),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ // add a randomized unknown field to manifests and config
+ if r.Config.Data.CustomFields {
+ tdName = "custom-fields"
+ r.State.Data[tdName] = newTestData("Custom Fields")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataTests = append(dataTests, tdName)
+ _, err = r.State.Data[tdName].genIndexFull(
+ genWithTag("custom-fields"),
+ genWithPlatforms([]*image.Platform{
+ {OS: "linux", Architecture: "amd64"},
+ {OS: "linux", Architecture: "arm64"},
+ }),
+ genWithExtraField(),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ }
+ tdName = "bad-digest-image"
+ r.State.Data[tdName] = newTestData("Bad Digest Image")
+ r.State.DataStatus[tdName] = statusUnknown
+ dataFailManifestTests = append(dataFailManifestTests, struct {
+ tdName string
+ opts []apiDoOpt
+ }{tdName: tdName, opts: []apiDoOpt{apiWithFlag("ExpectBadDigest")}})
+ dig, err := r.State.Data[tdName].genManifestFull()
+ if err != nil {
+ return fmt.Errorf("failed to generate test data: %w", err)
+ }
+ // add some whitespace to make the digest mismatch
+ r.State.Data[tdName].manifests[dig] = append(r.State.Data[tdName].manifests[dig], []byte(" ")...)
+
+ // TODO: sha512 digest
+
+ return nil
+}
+
+func (r *runner) Report(w io.Writer) {
+ fmt.Fprintf(w, "Test results\n")
+ r.Results.ReportWalkErr(w, "")
+ fmt.Fprintf(w, "\n")
+
+ fmt.Fprintf(w, "OCI Conformance Result: %s\n", r.Results.Status.String())
+ padWidth := 30
+
+ statusTotal := 0
+ for i := status(1); i < statusMax; i++ {
+ pad := ""
+ if len(i.String()) < padWidth {
+ pad = strings.Repeat(".", padWidth-len(i.String()))
+ }
+ fmt.Fprintf(w, " %s%s: %10d\n", i.String(), pad, r.Results.Counts[i])
+ statusTotal += r.Results.Counts[i]
+ }
+ pad := strings.Repeat(".", padWidth-len("Total"))
+ fmt.Fprintf(w, " %s%s: %10d\n\n", "Total", pad, statusTotal)
+
+ if len(r.Results.Errs) > 0 {
+ fmt.Fprintf(w, "Errors:\n%s\n\n", errors.Join(r.Results.Errs...))
+ }
+
+ fmt.Fprintf(w, "API conformance:\n")
+ for i := range stateAPIMax {
+ pad := ""
+ if len(i.String()) < padWidth {
+ pad = strings.Repeat(".", padWidth-len(i.String()))
+ }
+ fmt.Fprintf(w, " %s%s: %10s\n", i.String(), pad, r.State.APIStatus[i].String())
+ }
+ fmt.Fprintf(w, "\n")
+
+ fmt.Fprintf(w, "Data conformance:\n")
+ tdNames := []string{}
+ for tdName := range r.State.Data {
+ tdNames = append(tdNames, tdName)
+ }
+ sort.Strings(tdNames)
+ for _, tdName := range tdNames {
+ pad := ""
+ if len(r.State.Data[tdName].name) < padWidth {
+ pad = strings.Repeat(".", padWidth-len(r.State.Data[tdName].name))
+ }
+ fmt.Fprintf(w, " %s%s: %10s\n", r.State.Data[tdName].name, pad, r.State.DataStatus[tdName].String())
+ }
+ fmt.Fprintf(w, "\n")
+
+ fmt.Fprintf(w, "Configuration:\n")
+ fmt.Fprintf(w, " %s", strings.ReplaceAll(r.Config.Report(), "\n", "\n "))
+ fmt.Fprintf(w, "\n")
+}
+
+func (r *runner) ReportJunit(w io.Writer) error {
+ ju := r.toJunit()
+ enc := xml.NewEncoder(w)
+ enc.Indent("", " ")
+ return enc.Encode(ju)
+}
+
+func (r *runner) toJunit() *junitTestSuites {
+ statusTotal := 0
+ for i := status(1); i < statusMax; i++ {
+ statusTotal += r.Results.Counts[i]
+ }
+ tSec := fmt.Sprintf("%f", r.Results.Stop.Sub(r.Results.Start).Seconds())
+ jTSuites := junitTestSuites{
+ Tests: statusTotal,
+ Errors: r.Results.Counts[statusError],
+ Failures: r.Results.Counts[statusFail],
+ Skipped: r.Results.Counts[statusSkip],
+ Disabled: r.Results.Counts[statusDisabled],
+ Time: tSec,
+ }
+ jTSuite := junitTestSuite{
+ Name: r.Results.Name,
+ Tests: statusTotal,
+ Errors: r.Results.Counts[statusError],
+ Failures: r.Results.Counts[statusFail],
+ Skipped: r.Results.Counts[statusSkip],
+ Disabled: r.Results.Counts[statusDisabled],
+ Time: tSec,
+ Testcases: r.Results.ToJunitTestCases(),
+ }
+ jTSuite.Properties = []junitProperty{{Name: "Config", Value: r.Config.Report()}}
+ jTSuites.Suites = []junitTestSuite{jTSuite}
+ return &jTSuites
+}
+
+type reportData struct {
+ Config config
+ Results *results
+ NumTotal int
+ NumPassed int
+ NumFailed int
+ NumSkipped int
+ PercentPassed int
+ PercentFailed int
+ PercentSkipped int
+ StartTimeString string
+ EndTimeString string
+ RunTime string
+ AllPassed bool
+ AllFailed bool
+ AllSkipped bool
+ Version string
+}
+
+func (r *runner) ReportHTML(w io.Writer) error {
+ data := reportData{
+ Config: r.Config,
+ Results: r.Results,
+ NumTotal: r.Results.Counts[statusPass] + r.Results.Counts[statusFail] + r.Results.Counts[statusError] + r.Results.Counts[statusSkip] + r.Results.Counts[statusDisabled],
+ NumPassed: r.Results.Counts[statusPass],
+ NumFailed: r.Results.Counts[statusFail] + r.Results.Counts[statusError],
+ NumSkipped: r.Results.Counts[statusSkip] + r.Results.Counts[statusDisabled],
+ StartTimeString: r.Results.Start.Format("Jan 2 15:04:05.000 -0700 MST"),
+ EndTimeString: r.Results.Stop.Format("Jan 2 15:04:05.000 -0700 MST"),
+ RunTime: r.Results.Stop.Sub(r.Results.Start).String(),
+ }
+ data.PercentPassed = int(math.Round(float64(data.NumPassed) / float64(data.NumTotal) * 100))
+ data.PercentFailed = int(math.Round(float64(data.NumFailed) / float64(data.NumTotal) * 100))
+ data.PercentSkipped = int(math.Round(float64(data.NumSkipped) / float64(data.NumTotal) * 100))
+ data.AllPassed = data.NumPassed == data.NumTotal
+ data.AllFailed = data.NumFailed == data.NumTotal
+ data.AllSkipped = data.NumSkipped == data.NumTotal
+ data.Version = r.Config.Version
+ // load all templates
+ t := template.New("report")
+ for name, value := range confHTMLTemplates {
+ tAdd, err := template.New(name).Parse(value)
+ if err != nil {
+ return fmt.Errorf("cannot parse report template %s: %v", name, err)
+ }
+ t, err = t.AddParseTree(name, tAdd.Tree)
+ if err != nil {
+ return fmt.Errorf("cannot add report template %s to tree: %v", name, err)
+ }
+ }
+ // execute the top level report template
+ return t.ExecuteTemplate(w, "report", data)
+}
+
+func (r *runner) TestAll() error {
+ errs := []error{}
+ r.Results.Start = time.Now()
+ repo := r.Config.Repo1
+ repo2 := r.Config.Repo2
+
+ err := r.GenerateData()
+ if err != nil {
+ return fmt.Errorf("aborting tests, unable to generate data: %w", err)
+ }
+
+ err = r.TestEmpty(r.Results, repo)
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ algos := []digest.Algorithm{digest.SHA256}
+ if r.Config.Data.Sha512 {
+ algos = append(algos, digest.SHA512)
+ }
+ for _, algo := range algos {
+ err = r.TestBlobAPIs(r.Results, "blobs-"+algo.String(), "Blobs "+algo.String(), algo, repo, repo2)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // loop over different types of data
+ for _, tdName := range dataTests {
+ err = r.ChildRun(tdName, r.Results, func(r *runner, res *results) error {
+ errs := []error{}
+ // push
+ err := r.TestPush(res, tdName, repo)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ // list, pull, and query
+ err = r.TestList(res, tdName, repo)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ err = r.TestHead(res, tdName, repo)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ err = r.TestPull(res, tdName, repo)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ err = r.TestReferrers(res, tdName, repo)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ // delete
+ err = r.TestDelete(res, tdName, repo)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ return errors.Join(errs...)
+ })
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // other tests with expected failures to push the manifest
+ for _, failTest := range dataFailManifestTests {
+ tdName := failTest.tdName
+ err = r.ChildRun(tdName, r.Results, func(r *runner, res *results) error {
+ errs := []error{}
+ for dig := range r.State.Data[tdName].blobs {
+ err := r.TestPushBlobAny(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to push blob %s%.0w", dig.String(), err))
+ }
+ }
+ for i, dig := range r.State.Data[tdName].manOrder {
+ err := r.TestPushManifestDigest(res, tdName, repo, dig, failTest.opts...)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to push manifest %d, digest %s%.0w", i, dig.String(), err))
+ }
+ }
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+ })
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ r.Results.Stop = time.Now()
+
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+}
+
+func (r *runner) TestBlobAPIs(parent *results, tdName, tdDesc string, algo digest.Algorithm, repo, repo2 string) error {
+ return r.ChildRun(algo.String()+" blobs", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobPush); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobPush)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ errs := []error{}
+ r.State.Data[tdName] = newTestData(tdDesc)
+ r.State.DataStatus[tdName] = statusUnknown
+ digests := map[string]digest.Digest{}
+ // test the various blob push APIs
+ if _, ok := blobAPIsTestedByAlgo[algo]; !ok {
+ blobAPIsTestedByAlgo[algo] = &[stateAPIMax]bool{}
+ }
+ blobAPITests := []string{"post only", "post+put", "chunked single", "stream", "mount", "mount anonymous", "mount missing"}
+ for _, name := range blobAPITests {
+ dig, _, err := r.State.Data[tdName].genBlob(genWithBlobSize(512), genWithAlgo(algo))
+ if err != nil {
+ return fmt.Errorf("failed to generate blob: %w", err)
+ }
+ digests[name] = dig
+ }
+ blobAPITests = append(blobAPITests, "chunked multi", "chunked multi and put chunk")
+ minChunkSize := int64(chunkMin)
+ minHeader := ""
+ for _, testName := range blobAPITests {
+ err := r.ChildRun(testName, res, func(r *runner, res *results) error {
+ var err error
+ errs := []error{}
+ dig := digests[testName]
+ var api stateAPIType
+ switch testName {
+ case "post only":
+ api = stateAPIBlobPostOnly
+ err = r.TestPushBlobPostOnly(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case "post+put":
+ api = stateAPIBlobPostPut
+ err = r.TestPushBlobPostPut(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case "chunked single":
+ api = stateAPIBlobPatchChunked
+ // extract the min chunk length from a chunked push with a single chunk
+ err = r.TestPushBlobPatchChunked(res, tdName, repo, dig, apiReturnHeader("OCI-Chunk-Min-Length", &minHeader))
+ if err != nil {
+ errs = append(errs, err)
+ }
+ if minHeader != "" {
+ minParse, err := strconv.Atoi(minHeader)
+ if err == nil && int64(minParse) > minChunkSize {
+ minChunkSize = int64(minParse)
+ }
+ }
+ case "chunked multi":
+ api = stateAPIBlobPatchChunked
+ // generate a blob large enough to span three chunks
+ dig, _, err = r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo))
+ if err != nil {
+ return fmt.Errorf("failed to generate chunked blob of size %d: %w", minChunkSize*3-5, err)
+ }
+ digests[testName] = dig
+ err = r.TestPushBlobPatchChunked(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case "chunked multi and put chunk":
+ api = stateAPIBlobPatchChunked
+ // generate a blob large enough to span three chunks
+ dig, _, err = r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo))
+ if err != nil {
+ return fmt.Errorf("failed to generate chunked blob of size %d: %w", minChunkSize*3-5, err)
+ }
+ digests[testName] = dig
+ err = r.TestPushBlobPatchChunked(res, tdName, repo, dig, apiWithFlag("PutLastChunk"))
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case "stream":
+ api = stateAPIBlobPatchStream
+ err = r.TestPushBlobPatchStream(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case "mount":
+ api = stateAPIBlobMountSource
+ // first push to repo2
+ err = r.TestPushBlobAny(res, tdName, repo2, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ // then mount repo2 to repo
+ err = r.TestPushBlobMount(res, tdName, repo, repo2, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case "mount anonymous":
+ api = stateAPIBlobMountAnonymous
+ // first push to repo2
+ err = r.TestPushBlobAny(res, tdName, repo2, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ // then mount repo2 to repo
+ err = r.TestPushBlobMountAnonymous(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case "mount missing":
+ // mount repo2 to repo without first pushing there
+ err = r.TestPushBlobMountMissing(res, tdName, repo, repo2, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ default:
+ return fmt.Errorf("unknown api test %s", testName)
+ }
+ // track the used APIs so TestPushBlobAny doesn't rerun tests
+ blobAPIsTested[api] = true
+ blobAPIsTestedByAlgo[dig.Algorithm()][api] = true
+ if err == nil {
+ // head request
+ err = r.TestHeadBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ // pull each blob
+ err = r.TestPullBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ // cleanup
+ err = r.TestDeleteBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ if testName == "mount" || testName == "mount anonymous" {
+ err = r.TestDeleteBlob(res, tdName, repo2, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return errors.Join(errs...)
+ })
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ // test various well known blob contents
+ blobDataTests := map[string][]byte{}
+ if r.Config.Data.EmptyBlob {
+ blobDataTests["empty"] = []byte("")
+ }
+ blobDataTests["emptyJSON"] = []byte("{}")
+ for name, val := range blobDataTests {
+ dig := algo.FromBytes(val)
+ digests[name] = dig
+ r.State.Data[tdName].blobs[dig] = val
+ }
+ for name := range blobDataTests {
+ err := r.ChildRun(name, res, func(r *runner, res *results) error {
+ dig := digests[name]
+ err := r.TestPushBlobAny(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ err = r.TestHeadBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ err = r.TestPullBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ err = r.TestDeleteBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ return errors.Join(errs...)
+ })
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ // test the various blob push APIs with a bad digest
+ blobAPIBadDigTests := []string{"bad digest post only", "bad digest post+put", "bad digest chunked", "bad digest chunked and put chunk", "bad digest stream"}
+ for _, name := range blobAPIBadDigTests {
+ dig, _, err := r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo))
+ if err != nil {
+ return fmt.Errorf("failed to generate blob: %w", err)
+ }
+ // corrupt the blob bytes
+ r.State.Data[tdName].blobs[dig] = append(r.State.Data[tdName].blobs[dig], []byte("oh no")...)
+ digests[name] = dig
+ }
+ optBadDig := apiWithFlag("ExpectBadDigest")
+ for _, testName := range blobAPIBadDigTests {
+ err := r.ChildRun(testName, res, func(r *runner, res *results) error {
+ dig := digests[testName]
+ switch testName {
+ case "bad digest post only":
+ return r.TestPushBlobPostOnly(res, tdName, repo, dig, optBadDig)
+ case "bad digest post+put":
+ return r.TestPushBlobPostPut(res, tdName, repo, dig, optBadDig)
+ case "bad digest chunked":
+ return r.TestPushBlobPatchChunked(res, tdName, repo, dig, optBadDig)
+ case "bad digest chunked and put chunk":
+ return r.TestPushBlobPatchChunked(res, tdName, repo, dig, optBadDig)
+ case "bad digest stream":
+ return r.TestPushBlobPatchStream(res, tdName, repo, dig, optBadDig)
+ default:
+ return fmt.Errorf("unknown api test %s", testName)
+ }
+ })
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ return errors.Join(errs...)
+ })
+}
+
+func (r *runner) TestDelete(parent *results, tdName string, repo string) error {
+ return r.ChildRun("delete", parent, func(r *runner, res *results) error {
+ errs := []error{}
+ delOrder := slices.Clone(r.State.Data[tdName].manOrder)
+ slices.Reverse(delOrder)
+ for tag, dig := range r.State.Data[tdName].tags {
+ err := r.TestDeleteTag(res, tdName, repo, tag, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to delete manifest tag %s%.0w", tag, err))
+ }
+ }
+ for i, dig := range delOrder {
+ err := r.TestDeleteManifest(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to delete manifest %d, digest %s%.0w", i, dig.String(), err))
+ }
+ }
+ for dig := range r.State.Data[tdName].blobs {
+ err := r.TestDeleteBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to delete blob %s%.0w", dig.String(), err))
+ }
+ }
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+ })
+}
+
+func (r *runner) TestDeleteTag(parent *results, tdName string, repo string, tag string, dig digest.Digest) error {
+ td := r.State.Data[tdName]
+ return r.ChildRun("tag-delete", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPITagDelete); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPITagDelete, stateAPITagDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.ManifestDelete(r.Config.schemeReg, repo, tag, dig, td, apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, tdName, stateAPITagDelete)
+ r.TestSkip(res, err, tdName, stateAPITagDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPITagDelete)
+ // verify tag delete finished immediately
+ if err := r.APIRequire(stateAPITagDeleteAtomic); err != nil {
+ r.TestSkip(res, err, tdName, stateAPITagDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.ManifestHeadReq(r.Config.schemeReg, repo, tag, dig, td, apiSaveOutput(res.Output), apiExpectStatus(http.StatusNotFound)); err != nil {
+ r.TestFail(res, err, tdName, stateAPITagDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPITagDeleteAtomic)
+ return nil
+ })
+}
+
+func (r *runner) TestDeleteManifest(parent *results, tdName string, repo string, dig digest.Digest) error {
+ td := r.State.Data[tdName]
+ return r.ChildRun("manifest-delete", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIManifestDelete); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIManifestDelete, stateAPIManifestDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.ManifestDelete(r.Config.schemeReg, repo, dig.String(), dig, td, apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIManifestDelete)
+ r.TestSkip(res, err, tdName, stateAPIManifestDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIManifestDelete)
+ // verify manifest delete finished immediately
+ if err := r.APIRequire(stateAPIManifestDeleteAtomic); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIManifestDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.ManifestHeadReq(r.Config.schemeReg, repo, dig.String(), dig, td, apiSaveOutput(res.Output), apiExpectStatus(http.StatusNotFound)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIManifestDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIManifestDeleteAtomic)
+ return nil
+ })
+}
+
+func (r *runner) TestDeleteBlob(parent *results, tdName string, repo string, dig digest.Digest) error {
+ return r.ChildRun("blob-delete", parent, func(r *runner, res *results) error {
+ td := r.State.Data[tdName]
+ if err := r.APIRequire(stateAPIBlobDelete); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobDelete, stateAPIBlobDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.BlobDelete(r.Config.schemeReg, repo, dig, td, apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobDelete)
+ r.TestSkip(res, err, tdName, stateAPIBlobDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobDelete)
+ // verify blob delete finished immediately
+ if err := r.APIRequire(stateAPIBlobDeleteAtomic); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.BlobHeadReq(r.Config.schemeReg, repo, dig, td, apiSaveOutput(res.Output), apiExpectStatus(http.StatusNotFound)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobDeleteAtomic)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobDeleteAtomic)
+ return nil
+ })
+}
+
+func (r *runner) TestEmpty(parent *results, repo string) error {
+ return r.ChildRun("empty", parent, func(r *runner, res *results) error {
+ errs := []error{}
+ if err := r.TestEmptyTagList(res, repo); err != nil {
+ errs = append(errs, err)
+ }
+ // TODO: test referrers response on unknown digest
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+ })
+}
+
+func (r *runner) TestEmptyTagList(parent *results, repo string) error {
+ return r.ChildRun("tag list", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPITagList); err != nil {
+ r.TestSkip(res, err, "", stateAPITagList)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if _, err := r.API.TagList(r.Config.schemeReg, repo, apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, "", stateAPITagList)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, "", stateAPITagList)
+ return nil
+ })
+}
+
+func (r *runner) TestHead(parent *results, tdName string, repo string) error {
+ return r.ChildRun("head", parent, func(r *runner, res *results) error {
+ errs := []error{}
+ for tag, dig := range r.State.Data[tdName].tags {
+ err := r.TestHeadManifestTag(res, tdName, repo, tag, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to send head request for manifest by tag %s%.0w", tag, err))
+ }
+ }
+ for i, dig := range r.State.Data[tdName].manOrder {
+ err := r.TestHeadManifestDigest(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to send head request for manifest %d, digest %s%.0w", i, dig.String(), err))
+ }
+ }
+ for dig := range r.State.Data[tdName].blobs {
+ err := r.TestHeadBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to send head request for blob %s%.0w", dig.String(), err))
+ }
+ }
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+ })
+}
+
+func (r *runner) TestHeadBlob(parent *results, tdName string, repo string, dig digest.Digest) error {
+ return r.ChildRun("blob-head", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobHead); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobHead)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.BlobHeadExists(r.Config.schemeReg, repo, dig, r.State.Data[tdName], apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobHead)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobHead)
+ return nil
+ })
+}
+
+func (r *runner) TestHeadManifestDigest(parent *results, tdName string, repo string, dig digest.Digest) error {
+ td := r.State.Data[tdName]
+ opts := []apiDoOpt{}
+ apis := []stateAPIType{}
+ return r.ChildRun("manifest-head-by-digest", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIManifestHeadDigest); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIManifestHeadDigest)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ apis = append(apis, stateAPIManifestHeadDigest)
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.ManifestHeadExists(r.Config.schemeReg, repo, dig.String(), dig, td, opts...); err != nil {
+ r.TestFail(res, err, tdName, apis...)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, apis...)
+ return nil
+ })
+}
+
+func (r *runner) TestHeadManifestTag(parent *results, tdName string, repo string, tag string, dig digest.Digest) error {
+ td := r.State.Data[tdName]
+ opts := []apiDoOpt{}
+ apis := []stateAPIType{}
+ return r.ChildRun("manifest-head-by-tag", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIManifestHeadTag); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIManifestHeadTag)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ apis = append(apis, stateAPIManifestHeadTag)
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.ManifestHeadExists(r.Config.schemeReg, repo, tag, dig, td, opts...); err != nil {
+ r.TestFail(res, err, tdName, apis...)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, apis...)
+ return nil
+ })
+}
+
+func (r *runner) TestList(parent *results, tdName string, repo string) error {
+ return r.ChildRun("tag-list", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPITagList); err != nil {
+ r.TestSkip(res, err, tdName, stateAPITagList)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ tagList, err := r.API.TagList(r.Config.schemeReg, repo, apiSaveOutput(res.Output))
+ if err != nil {
+ r.TestFail(res, err, tdName, stateAPITagList)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ errs := []error{}
+ for tag := range r.State.Data[tdName].tags {
+ if !slices.Contains(tagList.Tags, tag) {
+ errs = append(errs, fmt.Errorf("missing tag %q from listing%.0w", tag, errTestAPIFail))
+ }
+ }
+ if len(errs) > 0 {
+ r.TestFail(res, errors.Join(errs...), tdName, stateAPITagList)
+ return errors.Join(errs...)
+ }
+ r.TestPass(res, tdName, stateAPITagList)
+ return nil
+ })
+}
+
+func (r *runner) TestPull(parent *results, tdName string, repo string) error {
+ return r.ChildRun("pull", parent, func(r *runner, res *results) error {
+ errs := []error{}
+ for tag, dig := range r.State.Data[tdName].tags {
+ err := r.TestPullManifestTag(res, tdName, repo, tag, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to pull manifest by tag %s%.0w", tag, err))
+ }
+ }
+ for i, dig := range r.State.Data[tdName].manOrder {
+ err := r.TestPullManifestDigest(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to pull manifest %d, digest %s%.0w", i, dig.String(), err))
+ }
+ }
+ for dig := range r.State.Data[tdName].blobs {
+ err := r.TestPullBlob(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to pull blob %s%.0w", dig.String(), err))
+ }
+ }
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+ })
+}
+
+func (r *runner) TestPullBlob(parent *results, tdName string, repo string, dig digest.Digest) error {
+ return r.ChildRun("blob-get", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobGetFull); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobGetFull)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.BlobGetExistsFull(r.Config.schemeReg, repo, dig, r.State.Data[tdName], apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobGetFull)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobGetFull)
+ return nil
+ })
+}
+
+func (r *runner) TestPullManifestDigest(parent *results, tdName string, repo string, dig digest.Digest) error {
+ td := r.State.Data[tdName]
+ opts := []apiDoOpt{}
+ apis := []stateAPIType{}
+ return r.ChildRun("manifest-by-digest", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIManifestGetDigest); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIManifestGetDigest)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ apis = append(apis, stateAPIManifestGetDigest)
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.ManifestGetExists(r.Config.schemeReg, repo, dig.String(), dig, td, opts...); err != nil {
+ r.TestFail(res, err, tdName, apis...)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, apis...)
+ return nil
+ })
+}
+
+func (r *runner) TestPullManifestTag(parent *results, tdName string, repo string, tag string, dig digest.Digest) error {
+ td := r.State.Data[tdName]
+ opts := []apiDoOpt{}
+ apis := []stateAPIType{}
+ return r.ChildRun("manifest-by-tag", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIManifestGetTag); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIManifestGetTag)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ apis = append(apis, stateAPIManifestGetTag)
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.ManifestGetExists(r.Config.schemeReg, repo, tag, dig, td, opts...); err != nil {
+ r.TestFail(res, err, tdName, apis...)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, apis...)
+ return nil
+ })
+}
+
+func (r *runner) TestPush(parent *results, tdName string, repo string) error {
+ return r.ChildRun("push", parent, func(r *runner, res *results) error {
+ errs := []error{}
+ for dig := range r.State.Data[tdName].blobs {
+ err := r.TestPushBlobAny(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to push blob %s%.0w", dig.String(), err))
+ }
+ }
+ for i, dig := range r.State.Data[tdName].manOrder {
+ err := r.TestPushManifestDigest(res, tdName, repo, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to push manifest %d, digest %s%.0w", i, dig.String(), err))
+ }
+ }
+ for tag, dig := range r.State.Data[tdName].tags {
+ err := r.TestPushManifestTag(res, tdName, repo, tag, dig)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to push manifest tag %s%.0w", tag, err))
+ }
+ }
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+ })
+}
+
+var (
+ blobAPIs = []stateAPIType{stateAPIBlobPostPut, stateAPIBlobPostOnly, stateAPIBlobPatchStream, stateAPIBlobPatchChunked}
+ blobAPIsTested = [stateAPIMax]bool{}
+ blobAPIsTestedByAlgo = map[digest.Algorithm]*[stateAPIMax]bool{}
+)
+
+func (r *runner) TestPushBlobAny(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error {
+ if err := r.APIRequire(stateAPIBlobPush); err != nil {
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ apis := []stateAPIType{}
+ if _, ok := blobAPIsTestedByAlgo[dig.Algorithm()]; !ok {
+ blobAPIsTestedByAlgo[dig.Algorithm()] = &[stateAPIMax]bool{}
+ }
+ // first try untested APIs
+ for _, api := range blobAPIs {
+ if !blobAPIsTested[api] {
+ apis = append(apis, api)
+ }
+ }
+ // then untested with a given algorithm
+ for _, api := range blobAPIs {
+ if !blobAPIsTestedByAlgo[dig.Algorithm()][api] && !slices.Contains(apis, api) {
+ apis = append(apis, api)
+ }
+ }
+ // next use APIs that are known successful
+ for _, api := range blobAPIs {
+ if r.State.APIStatus[api] == statusPass && !slices.Contains(apis, api) {
+ apis = append(apis, api)
+ }
+ }
+ // lastly use APIs in preferred order
+ for _, api := range blobAPIs {
+ if !slices.Contains(apis, api) {
+ apis = append(apis, api)
+ }
+ }
+ // return on the first successful API
+ errs := []error{}
+ for _, api := range apis {
+ err := errors.New("not implemented")
+ switch api {
+ case stateAPIBlobPostPut:
+ err = r.TestPushBlobPostPut(parent, tdName, repo, dig, opts...)
+ case stateAPIBlobPostOnly:
+ err = r.TestPushBlobPostOnly(parent, tdName, repo, dig, opts...)
+ case stateAPIBlobPatchStream:
+ err = r.TestPushBlobPatchStream(parent, tdName, repo, dig, opts...)
+ case stateAPIBlobPatchChunked:
+ err = r.TestPushBlobPatchChunked(parent, tdName, repo, dig, opts...)
+ }
+ blobAPIsTested[api] = true
+ blobAPIsTestedByAlgo[dig.Algorithm()][api] = true
+ if err == nil {
+ return nil
+ }
+ errs = append(errs, err)
+ }
+ return errors.Join(errs...)
+}
+
+func (r *runner) TestPushBlobMount(parent *results, tdName string, repo, repo2 string, dig digest.Digest) error {
+ return r.ChildRun("blob-mount", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobMountSource); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobMountSource)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.BlobMount(r.Config.schemeReg, repo, repo2, dig, r.State.Data[tdName], apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobMountSource)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobMountSource)
+ return nil
+ })
+}
+
+func (r *runner) TestPushBlobMountAnonymous(parent *results, tdName string, repo string, dig digest.Digest) error {
+ return r.ChildRun("blob-mount-anonymous", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobMountAnonymous); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobMountAnonymous)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.BlobMount(r.Config.schemeReg, repo, "", dig, r.State.Data[tdName], apiSaveOutput(res.Output)); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobMountAnonymous)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobMountAnonymous)
+ return nil
+ })
+}
+
+func (r *runner) TestPushBlobMountMissing(parent *results, tdName string, repo, repo2 string, dig digest.Digest) error {
+ return r.ChildRun("blob-mount", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobMountSource); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobMountSource)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ if err := r.API.BlobMount(r.Config.schemeReg, repo, repo2, dig, r.State.Data[tdName], apiSaveOutput(res.Output)); !errors.Is(err, ErrRegUnsupported) {
+ if err == nil {
+ err = fmt.Errorf("blob mount of missing blob incorrectly succeeded")
+ }
+ r.TestFail(res, err, tdName, stateAPIBlobMountSource)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobMountSource)
+ return nil
+ })
+}
+
+func (r *runner) TestPushBlobPostPut(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error {
+ return r.ChildRun("blob-post-put", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobPostPut); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobPostPut)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.BlobPostPut(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobPostPut)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobPostPut, stateAPIBlobPush)
+ return nil
+ })
+}
+
+func (r *runner) TestPushBlobPostOnly(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error {
+ return r.ChildRun("blob-post-only", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobPostOnly); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobPostOnly)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.BlobPostOnly(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobPostOnly)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobPostOnly, stateAPIBlobPush)
+ return nil
+ })
+}
+
+func (r *runner) TestPushBlobPatchChunked(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error {
+ return r.ChildRun("blob-patch-chunked", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobPatchChunked); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobPatchChunked)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.BlobPatchChunked(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobPatchChunked)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobPatchChunked, stateAPIBlobPush)
+ return nil
+ })
+}
+
+func (r *runner) TestPushBlobPatchStream(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error {
+ return r.ChildRun("blob-patch-stream", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIBlobPatchStream); err != nil {
+ r.TestSkip(res, err, tdName, stateAPIBlobPatchStream)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.BlobPatchStream(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil {
+ r.TestFail(res, err, tdName, stateAPIBlobPatchStream)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, stateAPIBlobPatchStream, stateAPIBlobPush)
+ return nil
+ })
+}
+
+func (r *runner) TestPushManifestDigest(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error {
+ td := r.State.Data[tdName]
+ apis := []stateAPIType{}
+ // if the referrers API is being tested, verify OCI-Subject header is returned when appropriate
+ subj := detectSubject(td.manifests[dig])
+ if subj != nil {
+ apis = append(apis, stateAPIManifestPutSubject)
+ if r.Config.APIs.Referrer {
+ opts = append(opts, apiExpectHeader("OCI-Subject", subj.Digest.String()))
+ }
+ }
+ return r.ChildRun("manifest-by-digest", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIManifestPutDigest); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIManifestPutDigest)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ apis = append(apis, stateAPIManifestPutDigest)
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.ManifestPut(r.Config.schemeReg, repo, dig.String(), dig, td, opts...); err != nil {
+ r.TestFail(res, err, tdName, apis...)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, apis...)
+ return nil
+ })
+}
+
+func (r *runner) TestPushManifestTag(parent *results, tdName string, repo string, tag string, dig digest.Digest, opts ...apiDoOpt) error {
+ td := r.State.Data[tdName]
+ apis := []stateAPIType{}
+ // if the referrers API is being tested, verify OCI-Subject header is returned when appropriate
+ subj := detectSubject(td.manifests[dig])
+ if subj != nil {
+ apis = append(apis, stateAPIManifestPutSubject)
+ if r.Config.APIs.Referrer {
+ opts = append(opts, apiExpectHeader("OCI-Subject", subj.Digest.String()))
+ }
+ }
+ return r.ChildRun("manifest-by-tag", parent, func(r *runner, res *results) error {
+ if err := r.APIRequire(stateAPIManifestPutTag); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIManifestPutTag)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ apis = append(apis, stateAPIManifestPutTag)
+ opts = append(opts, apiSaveOutput(res.Output))
+ if err := r.API.ManifestPut(r.Config.schemeReg, repo, tag, dig, td, opts...); err != nil {
+ r.TestFail(res, err, tdName, apis...)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, err)
+ }
+ r.TestPass(res, tdName, apis...)
+ return nil
+ })
+}
+
+func (r *runner) TestReferrers(parent *results, tdName string, repo string) error {
+ if len(r.State.Data[tdName].referrers) == 0 {
+ return nil
+ }
+ return r.ChildRun("referrers", parent, func(r *runner, res *results) error {
+ errs := []error{}
+ for subj, referrerGoal := range r.State.Data[tdName].referrers {
+ if err := r.APIRequire(stateAPIReferrers); err != nil {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip)
+ r.TestSkip(res, err, tdName, stateAPIReferrers)
+ return fmt.Errorf("%.0w%w", errTestAPISkip, err)
+ }
+ referrerResp, err := r.API.ReferrersList(r.Config.schemeReg, repo, subj, apiSaveOutput(res.Output))
+ if err != nil {
+ errs = append(errs, err)
+ }
+ if err == nil {
+ for _, goal := range referrerGoal {
+ if !slices.ContainsFunc(referrerResp.Manifests, func(resp image.Descriptor) bool {
+ return resp.Digest == goal.Digest &&
+ resp.MediaType == goal.MediaType &&
+ resp.Size == goal.Size &&
+ resp.ArtifactType == goal.ArtifactType &&
+ mapContainsAll(resp.Annotations, goal.Annotations)
+ }) {
+ errs = append(errs, fmt.Errorf("entry missing from referrers list, subject %s, referrer %+v%.0w", subj, goal, errTestAPIFail))
+ }
+ }
+ }
+ }
+ if len(errs) > 0 {
+ r.TestFail(res, errors.Join(errs...), tdName, stateAPIReferrers)
+ return fmt.Errorf("%.0w%w", errTestAPIFail, errors.Join(errs...))
+ }
+ r.TestPass(res, tdName, stateAPIReferrers)
+ return nil
+ })
+}
+
+func mapContainsAll[K comparable, V comparable](check, goal map[K]V) bool {
+ if len(goal) == 0 {
+ return true
+ }
+ for k, v := range goal {
+ if found, ok := check[k]; !ok || found != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (r *runner) ChildRun(name string, parent *results, fn func(*runner, *results) error) error {
+ res := resultsNew(name, parent)
+ if parent != nil {
+ parent.Children = append(parent.Children, res)
+ }
+ err := fn(r, res)
+ res.Stop = time.Now()
+ if err != nil && !errors.Is(err, errTestAPIFail) && !errors.Is(err, errTestAPISkip) {
+ res.Errs = append(res.Errs, err)
+ res.Status = res.Status.Set(statusError)
+ res.Counts[statusError]++
+ }
+ if parent != nil {
+ for i := range statusMax {
+ parent.Counts[i] += res.Counts[i]
+ }
+ parent.Status = parent.Status.Set(res.Status)
+ }
+ return err
+}
+
+func (r *runner) TestSkip(res *results, err error, tdName string, apis ...stateAPIType) {
+ s := statusSkip
+ if errors.Is(err, ErrDisabled) {
+ s = statusDisabled
+ }
+ res.Status = res.Status.Set(s)
+ res.Counts[s]++
+ if tdName != "" {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(s)
+ }
+ for _, a := range apis {
+ r.State.APIStatus[a] = r.State.APIStatus[a].Set(s)
+ }
+ fmt.Fprintf(res.Output, "%s: skipping test:\n %s\n", res.Name,
+ strings.ReplaceAll(err.Error(), "\n", "\n "))
+ r.Log.Info("skipping test", "name", res.Name, "error", err.Error())
+}
+
+func (r *runner) TestFail(res *results, err error, tdName string, apis ...stateAPIType) {
+ s := statusFail
+ if errors.Is(err, errTestAPIError) {
+ s = statusError
+ } else if errors.Is(err, ErrRegUnsupported) {
+ s = statusDisabled
+ }
+ res.Status = res.Status.Set(s)
+ res.Counts[s]++
+ res.Errs = append(res.Errs, err)
+ if tdName != "" {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(s)
+ }
+ for _, a := range apis {
+ r.State.APIStatus[a] = r.State.APIStatus[a].Set(s)
+ }
+ if s == statusFail {
+ r.Log.Warn("failed test", "name", res.Name, "error", err.Error())
+ r.Log.Debug("failed test output", "name", res.Name, "output", res.Output.String())
+ }
+}
+
+func (r *runner) TestPass(res *results, tdName string, apis ...stateAPIType) {
+ res.Status = res.Status.Set(statusPass)
+ res.Counts[statusPass]++
+ if tdName != "" {
+ r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusPass)
+ }
+ for _, a := range apis {
+ r.State.APIStatus[a] = r.State.APIStatus[a].Set(statusPass)
+ }
+ r.Log.Info("passing test", "name", res.Name)
+ r.Log.Debug("passing test output", "name", res.Name, "output", res.Output.String())
+}
+
+func (r *runner) APIRequire(apis ...stateAPIType) error {
+ errs := []error{}
+ for _, a := range apis {
+ aText, err := a.MarshalText()
+ if err != nil {
+ errs = append(errs, fmt.Errorf("unknown api %d", a))
+ continue
+ }
+ // check the configuration disables the api
+ switch a {
+ case stateAPITagList:
+ if !r.Config.APIs.Tags.List {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIManifestGetTag, stateAPIManifestGetDigest, stateAPIBlobGetFull, stateAPIBlobGetRange:
+ if !r.Config.APIs.Pull {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIManifestPutTag, stateAPIManifestPutDigest, stateAPIManifestPutSubject,
+ stateAPIBlobPush, stateAPIBlobPostOnly, stateAPIBlobPostPut,
+ stateAPIBlobPatchChunked, stateAPIBlobPatchStream, stateAPIBlobMountSource:
+ if !r.Config.APIs.Push {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIBlobMountAnonymous:
+ if !r.Config.APIs.Push || !r.Config.APIs.Blobs.MountAnonymous {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPITagDelete:
+ if !r.Config.APIs.Tags.Delete {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPITagDeleteAtomic:
+ if !r.Config.APIs.Tags.Delete || !r.Config.APIs.Tags.Atomic {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIManifestDelete:
+ if !r.Config.APIs.Manifests.Delete {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIManifestDeleteAtomic:
+ if !r.Config.APIs.Manifests.Atomic {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIBlobDelete:
+ if !r.Config.APIs.Blobs.Delete {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIBlobDeleteAtomic:
+ if !r.Config.APIs.Blobs.Atomic {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ case stateAPIReferrers:
+ if !r.Config.APIs.Referrer {
+ errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, ErrDisabled))
+ }
+ }
+ // do not check the [r.global.apiState] since tests may pass or fail based on different input data
+ }
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ return nil
+}
diff --git a/conformance2/state.go b/conformance2/state.go
new file mode 100644
index 00000000..a1266ce4
--- /dev/null
+++ b/conformance2/state.go
@@ -0,0 +1,174 @@
+package main
+
+import (
+ "fmt"
+ "strings"
+)
+
+type state struct {
+ APIStatus map[stateAPIType]status
+ Data map[string]*testData
+ DataStatus map[string]status
+}
+
+func stateNew() *state {
+ return &state{
+ APIStatus: map[stateAPIType]status{},
+ Data: map[string]*testData{},
+ DataStatus: map[string]status{},
+ }
+}
+
+type stateAPIType int
+
+const (
+ stateAPITagList stateAPIType = iota
+ stateAPITagDelete
+ stateAPITagDeleteAtomic
+ stateAPIBlobPush // any blob push API
+ stateAPIBlobPostOnly
+ stateAPIBlobPostPut
+ stateAPIBlobPatchChunked
+ stateAPIBlobPatchStream
+ stateAPIBlobMountSource
+ stateAPIBlobMountAnonymous
+ stateAPIBlobGetFull
+ stateAPIBlobGetRange
+ stateAPIBlobHead
+ stateAPIBlobDelete
+ stateAPIBlobDeleteAtomic
+ stateAPIManifestPutDigest
+ stateAPIManifestPutTag
+ stateAPIManifestPutSubject
+ stateAPIManifestGetDigest
+ stateAPIManifestGetTag
+ stateAPIManifestHeadDigest
+ stateAPIManifestHeadTag
+ stateAPIManifestDelete
+ stateAPIManifestDeleteAtomic
+ stateAPIReferrers
+ stateAPIMax // number of APIs for iterating
+)
+
+func (a stateAPIType) String() string {
+ switch a {
+ default:
+ return "Unknown"
+ case stateAPITagList:
+ return "Tag listing"
+ case stateAPITagDelete:
+ return "Tag delete"
+ case stateAPITagDeleteAtomic:
+ return "Tag delete atomic"
+ case stateAPIBlobPush:
+ return "Blob push"
+ case stateAPIBlobPostOnly:
+ return "Blob post only"
+ case stateAPIBlobPostPut:
+ return "Blob post put"
+ case stateAPIBlobPatchChunked:
+ return "Blob chunked"
+ case stateAPIBlobPatchStream:
+ return "Blob streaming"
+ case stateAPIBlobMountSource:
+ return "Blob mount"
+ case stateAPIBlobMountAnonymous:
+ return "Blob anonymous mount"
+ case stateAPIBlobGetFull:
+ return "Blob get"
+ case stateAPIBlobGetRange:
+ return "Blob get range"
+ case stateAPIBlobHead:
+ return "Blob head"
+ case stateAPIBlobDelete:
+ return "Blob delete"
+ case stateAPIBlobDeleteAtomic:
+ return "Blob delete atomic"
+ case stateAPIManifestPutDigest:
+ return "Manifest put by digest"
+ case stateAPIManifestPutTag:
+ return "Manifest put by tag"
+ case stateAPIManifestPutSubject:
+ return "Manifest put with subject"
+ case stateAPIManifestGetDigest:
+ return "Manifest get by digest"
+ case stateAPIManifestGetTag:
+ return "Manifest get by tag"
+ case stateAPIManifestHeadDigest:
+ return "Manifest head by digest"
+ case stateAPIManifestHeadTag:
+ return "Manifest head by tag"
+ case stateAPIManifestDelete:
+ return "Manifest delete"
+ case stateAPIManifestDeleteAtomic:
+ return "Manifest delete atomic"
+ case stateAPIReferrers:
+ return "Referrers"
+ }
+}
+
+func (a stateAPIType) MarshalText() ([]byte, error) {
+ ret := a.String()
+ if ret == "unknown" {
+ return []byte(ret), fmt.Errorf("unknown API %d", a)
+ }
+ return []byte(ret), nil
+}
+
+func (a *stateAPIType) UnmarshalText(b []byte) error {
+ switch strings.ToLower(string(b)) {
+ default:
+ return fmt.Errorf("unknown API %s", b)
+ case "Tag listing":
+ *a = stateAPITagList
+ case "Tag delete":
+ *a = stateAPITagDelete
+ case "Tag delete atomic":
+ *a = stateAPITagDeleteAtomic
+ case "Blob push":
+ *a = stateAPIBlobPush
+ case "Blob post only":
+ *a = stateAPIBlobPostOnly
+ case "Blob post put":
+ *a = stateAPIBlobPostPut
+ case "Blob chunked":
+ *a = stateAPIBlobPatchChunked
+ case "Blob streaming":
+ *a = stateAPIBlobPatchStream
+ case "Blob mount":
+ *a = stateAPIBlobMountSource
+ case "Blob anonymous mount":
+ *a = stateAPIBlobMountAnonymous
+ case "Blob get":
+ *a = stateAPIBlobGetFull
+ case "Blob get range":
+ *a = stateAPIBlobGetRange
+ case "Blob head":
+ *a = stateAPIBlobHead
+ case "Blob delete":
+ *a = stateAPIBlobDelete
+ case "Blob delete atomic":
+ *a = stateAPIBlobDeleteAtomic
+ case "Manifest put by digest":
+ *a = stateAPIManifestPutDigest
+ case "Manifest put by tag":
+ *a = stateAPIManifestPutTag
+ case "Manifest put with subject":
+ *a = stateAPIManifestPutSubject
+ case "Manifest get by digest":
+ *a = stateAPIManifestGetDigest
+ case "Manifest get by tag":
+ *a = stateAPIManifestGetTag
+ case "Manifest head by digest":
+ *a = stateAPIManifestHeadDigest
+ case "Manifest head by tag":
+ *a = stateAPIManifestHeadTag
+ case "Manifest delete":
+ *a = stateAPIManifestDelete
+ case "Manifest delete atomic":
+ *a = stateAPIManifestDeleteAtomic
+ case "Referrers":
+ *a = stateAPIReferrers
+ }
+ return nil
+}
diff --git a/conformance2/testdata.go b/conformance2/testdata.go
new file mode 100644
index 00000000..b28f5a87
--- /dev/null
+++ b/conformance2/testdata.go
@@ -0,0 +1,581 @@
+package main
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "io"
+ "maps"
+ "math"
+ "math/big"
+ "reflect"
+ "strings"
+
+ digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go"
+ image "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type testData struct {
+ name string // name of data set for logs
+ tags map[string]digest.Digest
+ desc map[digest.Digest]*image.Descriptor
+ blobs map[digest.Digest][]byte
+ manifests map[digest.Digest][]byte
+ manOrder []digest.Digest // ordered list to push manifests, the last is optionally tagged
+ referrers map[digest.Digest][]*image.Descriptor
+}
+
+func newTestData(name string) *testData {
+ return &testData{
+ name: name,
+ tags: map[string]digest.Digest{},
+ desc: map[digest.Digest]*image.Descriptor{},
+ blobs: map[digest.Digest][]byte{},
+ manifests: map[digest.Digest][]byte{},
+ manOrder: []digest.Digest{},
+ referrers: map[digest.Digest][]*image.Descriptor{},
+ }
+}
+
+type genComp int
+
+const (
+ genCompUncomp genComp = iota
+ genCompGzip
+)
+
+type genOptS struct {
+ algo digest.Algorithm
+ annotations map[string]string
+ annotationUniq bool
+ artifactType string
+ blobSize int64
+ comp genComp
+ configBytes []byte
+ configMediaType string
+ descriptorMediaType string
+ extraField bool
+ layerCount int
+ layerMediaType string
+ platform image.Platform
+ platforms []*image.Platform
+ setData bool
+ subject *image.Descriptor
+ tag string
+}
+
+type genOpt func(*genOptS)
+
+func genWithAlgo(algo digest.Algorithm) genOpt {
+ return func(opt *genOptS) {
+ opt.algo = algo
+ }
+}
+
+func genWithAnnotations(annotations map[string]string) genOpt {
+ return func(opt *genOptS) {
+ if opt.annotations == nil {
+ opt.annotations = annotations
+ } else {
+ for k, v := range annotations {
+ opt.annotations[k] = v
+ }
+ }
+ }
+}
+
+func genWithAnnotationUniq() genOpt {
+ return func(opt *genOptS) {
+ opt.annotationUniq = true
+ }
+}
+
+func genWithArtifactType(artifactType string) genOpt {
+ return func(opt *genOptS) {
+ opt.artifactType = artifactType
+ }
+}
+
+func genWithCompress(comp genComp) genOpt {
+ return func(opt *genOptS) {
+ opt.comp = comp
+ }
+}
+
+func genWithConfigBytes(b []byte) genOpt {
+ return func(opt *genOptS) {
+ opt.configBytes = b
+ }
+}
+
+func genWithConfigMediaType(mediaType string) genOpt {
+ return func(opt *genOptS) {
+ opt.configMediaType = mediaType
+ }
+}
+
+func genWithDescriptorData() genOpt {
+ return func(opt *genOptS) {
+ opt.setData = true
+ }
+}
+
+func genWithDescriptorMediaType(mediaType string) genOpt {
+ return func(opt *genOptS) {
+ opt.descriptorMediaType = mediaType
+ }
+}
+
+func genWithExtraField() genOpt {
+ return func(opt *genOptS) {
+ opt.extraField = true
+ }
+}
+func genWithLayerCount(count int) genOpt {
+ return func(opt *genOptS) {
+ opt.layerCount = count
+ }
+}
+
+func genWithLayerMediaType(mediaType string) genOpt {
+ return func(opt *genOptS) {
+ opt.layerMediaType = mediaType
+ }
+}
+
+func genWithPlatform(p image.Platform) genOpt {
+ return func(opt *genOptS) {
+ opt.platform = p
+ }
+}
+
+func genWithPlatforms(platforms []*image.Platform) genOpt {
+ return func(opt *genOptS) {
+ opt.platforms = platforms
+ }
+}
+
+func genWithBlobSize(size int64) genOpt {
+ return func(opt *genOptS) {
+ opt.blobSize = size
+ }
+}
+
+func genWithSubject(subject image.Descriptor) genOpt {
+ return func(opt *genOptS) {
+ opt.subject = &subject
+ }
+}
+
+func genWithTag(tag string) genOpt {
+ return func(opt *genOptS) {
+ opt.tag = tag
+ }
+}
+
+func (td *testData) addBlob(b []byte, opts ...genOpt) (digest.Digest, error) {
+ gOpt := genOptS{
+ algo: digest.Canonical,
+ descriptorMediaType: "application/octet-stream",
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ dig := gOpt.algo.FromBytes(b)
+ td.blobs[dig] = b
+ td.desc[dig] = &image.Descriptor{
+ MediaType: gOpt.descriptorMediaType,
+ Digest: dig,
+ Size: int64(len(b)),
+ }
+ if gOpt.setData {
+ td.desc[dig].Data = b
+ }
+ return dig, nil
+}
+
+func (td *testData) genBlob(opts ...genOpt) (digest.Digest, []byte, error) {
+ gOpt := genOptS{
+ blobSize: 2048,
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ b := make([]byte, gOpt.blobSize)
+ _, err := rand.Read(b)
+ if err != nil {
+ return digest.Digest(""), nil, err
+ }
+ dig, err := td.addBlob(b, opts...)
+ return dig, b, err
+}
+
+// genLayer returns a new layer containing a tar file returning:
+// - compressed digest
+// - uncompressed digest
+// - layer body (tar+compression)
+func (td *testData) genLayer(fileNum int, opts ...genOpt) (digest.Digest, digest.Digest, []byte, error) {
+ gOpt := genOptS{
+ comp: genCompGzip,
+ algo: digest.Canonical,
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ bufUncomp := &bytes.Buffer{}
+ bufComp := &bytes.Buffer{}
+ var wUncomp io.Writer
+ var mt string
+ switch gOpt.comp {
+ case genCompGzip:
+ wUncomp = gzip.NewWriter(bufComp)
+ mt = "application/vnd.oci.image.layer.v1.tar+gzip"
+ case genCompUncomp:
+ wUncomp = bufComp
+ mt = "application/vnd.oci.image.layer.v1.tar"
+ }
+ wTar := tar.NewWriter(wUncomp)
+ bigRandNum, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
+ if err != nil {
+ return digest.Digest(""), digest.Digest(""), nil, err
+ }
+ randNum := bigRandNum.Int64()
+ file := fmt.Sprintf("Conformance test file contents for file number %d.\nTodays lucky number is %d\n", fileNum, randNum)
+ err = wTar.WriteHeader(&tar.Header{
+ Name: fmt.Sprintf("./conformance-%d.txt", fileNum),
+ Size: int64(len(file)),
+ Mode: 0644,
+ })
+ if err != nil {
+ return digest.Digest(""), digest.Digest(""), nil, err
+ }
+ _, err = wTar.Write([]byte(file))
+ if err != nil {
+ return digest.Digest(""), digest.Digest(""), nil, err
+ }
+ err = wTar.Close()
+ if err != nil {
+ return digest.Digest(""), digest.Digest(""), nil, err
+ }
+ if closer, ok := wUncomp.(io.Closer); gOpt.comp != genCompUncomp && ok {
+ err = closer.Close()
+ }
+ if err != nil {
+ return digest.Digest(""), digest.Digest(""), nil, err
+ }
+ bodyComp := bufComp.Bytes()
+ bodyUncomp := bufUncomp.Bytes()
+ digComp := gOpt.algo.FromBytes(bodyComp)
+ digUncomp := gOpt.algo.FromBytes(bodyUncomp)
+ td.blobs[digComp] = bodyComp
+ td.desc[digComp] = &image.Descriptor{
+ MediaType: mt,
+ Digest: digComp,
+ Size: int64(len(bodyComp)),
+ }
+ if gOpt.setData {
+ td.desc[digComp].Data = bodyComp
+ }
+ td.desc[digUncomp] = &image.Descriptor{
+ MediaType: "application/vnd.oci.image.layer.v1.tar",
+ Digest: digUncomp,
+ Size: int64(len(bodyUncomp)),
+ }
+ if gOpt.setData {
+ td.desc[digUncomp].Data = bodyUncomp
+ }
+ return digComp, digUncomp, bodyComp, nil
+}
+
+// genConfig returns a config for the given platform and list of uncompressed layer digests.
+func (td *testData) genConfig(p image.Platform, layers []digest.Digest, opts ...genOpt) (digest.Digest, []byte, error) {
+ gOpt := genOptS{
+ algo: digest.Canonical,
+ configMediaType: "application/vnd.oci.image.config.v1+json",
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ config := image.Image{
+ Author: "OCI Conformance Test",
+ Platform: p,
+ RootFS: image.RootFS{
+ Type: "layers",
+ DiffIDs: layers,
+ },
+ }
+ var body []byte
+ var err error
+ if !gOpt.extraField {
+ body, err = json.Marshal(config)
+ } else {
+ body, err = json.Marshal(genAddJSONFields(config))
+ }
+ if err != nil {
+ return digest.Digest(""), nil, err
+ }
+ dig := gOpt.algo.FromBytes(body)
+ td.blobs[dig] = body
+ td.desc[dig] = &image.Descriptor{
+ MediaType: gOpt.configMediaType,
+ Digest: dig,
+ Size: int64(len(body)),
+ }
+ if gOpt.setData {
+ td.desc[dig].Data = body
+ }
+ return dig, body, nil
+}
+
+// genManifest returns an image manifest with the selected config and compressed layer digests.
+func (td *testData) genManifest(conf image.Descriptor, layers []image.Descriptor, opts ...genOpt) (digest.Digest, []byte, error) {
+ gOpt := genOptS{
+ algo: digest.Canonical,
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ mt := "application/vnd.oci.image.manifest.v1+json"
+ m := image.Manifest{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ MediaType: mt,
+ ArtifactType: gOpt.artifactType,
+ Config: conf,
+ Layers: layers,
+ Subject: gOpt.subject,
+ Annotations: gOpt.annotations,
+ }
+ if gOpt.annotationUniq {
+ if m.Annotations == nil {
+ m.Annotations = map[string]string{}
+ } else {
+ m.Annotations = maps.Clone(m.Annotations)
+ }
+ m.Annotations["org.example."+rand.Text()] = rand.Text()
+ }
+ var body []byte
+ var err error
+ if !gOpt.extraField {
+ body, err = json.Marshal(m)
+ } else {
+ body, err = json.Marshal(genAddJSONFields(m))
+ }
+ if err != nil {
+ return digest.Digest(""), nil, err
+ }
+ dig := gOpt.algo.FromBytes(body)
+ td.manifests[dig] = body
+ td.manOrder = append(td.manOrder, dig)
+ td.desc[dig] = &image.Descriptor{
+ MediaType: m.MediaType,
+ Digest: dig,
+ Size: int64(len(body)),
+ }
+ if gOpt.setData {
+ td.desc[dig].Data = body
+ }
+ at := m.ArtifactType
+ if at == "" {
+ at = m.Config.MediaType
+ }
+ if gOpt.subject != nil {
+ td.referrers[gOpt.subject.Digest] = append(td.referrers[gOpt.subject.Digest], &image.Descriptor{
+ MediaType: m.MediaType,
+ ArtifactType: at,
+ Digest: dig,
+ Size: int64(len(body)),
+ Annotations: m.Annotations,
+ })
+ }
+ if gOpt.tag != "" {
+ td.tags[gOpt.tag] = dig
+ }
+ return dig, body, nil
+}
+
+// genManifestFull creates an image with layers and a config
+func (td *testData) genManifestFull(opts ...genOpt) (digest.Digest, error) {
+ gOpt := genOptS{
+ layerCount: 2,
+ platform: image.Platform{OS: "linux", Architecture: "amd64"},
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ digCList := []digest.Digest{}
+ digUCList := []digest.Digest{}
+ for l := range gOpt.layerCount {
+ if gOpt.layerMediaType == "" || strings.HasPrefix(gOpt.layerMediaType, "application/vnd.oci.image.layer.v1") {
+ // image
+ digC, digUC, _, err := td.genLayer(l, opts...)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate test data layer %d: %w", l, err)
+ }
+ digCList = append(digCList, digC)
+ digUCList = append(digUCList, digUC)
+ } else {
+ // artifact
+ lOpts := []genOpt{
+ genWithDescriptorMediaType(gOpt.layerMediaType),
+ }
+ lOpts = append(lOpts, opts...)
+ dig, _, err := td.genBlob(lOpts...)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate test artifact blob: %w", err)
+ }
+ digCList = append(digCList, dig)
+ digUCList = append(digUCList, dig)
+ }
+ }
+ cDig := digest.Digest("")
+ if gOpt.configMediaType == "" || gOpt.configMediaType == "application/vnd.oci.image.config.v1+json" {
+ // image config
+ dig, _, err := td.genConfig(gOpt.platform, digUCList, opts...)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate test data: %w", err)
+ }
+ cDig = dig
+ } else {
+ // artifact
+ bOpts := []genOpt{
+ genWithDescriptorMediaType(gOpt.configMediaType),
+ }
+ bOpts = append(bOpts, opts...)
+ dig, err := td.addBlob(gOpt.configBytes, bOpts...)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate test artifact config: %w", err)
+ }
+ cDig = dig
+ }
+ layers := make([]image.Descriptor, len(digCList))
+ for i, lDig := range digCList {
+ layers[i] = *td.desc[lDig]
+ }
+ mDig, _, err := td.genManifest(*td.desc[cDig], layers, opts...)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate test data: %w", err)
+ }
+ return mDig, nil
+}
+
+// genIndex returns an index manifest with the specified layers and platforms.
+func (td *testData) genIndex(platforms []*image.Platform, manifests []digest.Digest, opts ...genOpt) (digest.Digest, []byte, error) {
+ mt := "application/vnd.oci.image.index.v1+json"
+ gOpt := genOptS{
+ algo: digest.Canonical,
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ if len(platforms) != len(manifests) {
+ return digest.Digest(""), nil, fmt.Errorf("genIndex requires the same number of platforms and layers")
+ }
+ ind := image.Index{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ MediaType: mt,
+ ArtifactType: gOpt.artifactType,
+ Manifests: make([]image.Descriptor, len(manifests)),
+ Subject: gOpt.subject,
+ Annotations: gOpt.annotations,
+ }
+ for i, l := range manifests {
+ d := *td.desc[l]
+ d.Platform = platforms[i]
+ ind.Manifests[i] = d
+ }
+ if gOpt.annotationUniq {
+ if ind.Annotations == nil {
+ ind.Annotations = map[string]string{}
+ } else {
+ ind.Annotations = maps.Clone(ind.Annotations)
+ }
+ ind.Annotations["org.example."+rand.Text()] = rand.Text()
+ }
+ var body []byte
+ var err error
+ if !gOpt.extraField {
+ body, err = json.Marshal(ind)
+ } else {
+ body, err = json.Marshal(genAddJSONFields(ind))
+ }
+ if err != nil {
+ return digest.Digest(""), nil, err
+ }
+ dig := gOpt.algo.FromBytes(body)
+ td.manifests[dig] = body
+ td.manOrder = append(td.manOrder, dig)
+ td.desc[dig] = &image.Descriptor{
+ MediaType: ind.MediaType,
+ Digest: dig,
+ Size: int64(len(body)),
+ }
+ if gOpt.setData {
+ td.desc[dig].Data = body
+ }
+ if gOpt.subject != nil {
+ td.referrers[gOpt.subject.Digest] = append(td.referrers[gOpt.subject.Digest], &image.Descriptor{
+ MediaType: ind.MediaType,
+ ArtifactType: ind.ArtifactType,
+ Digest: dig,
+ Size: int64(len(body)),
+ Annotations: ind.Annotations,
+ })
+ }
+ if gOpt.tag != "" {
+ td.tags[gOpt.tag] = dig
+ }
+ return dig, body, nil
+}
+
+// genIndexFull creates an index with multiple images, including the image layers and configs
+func (td *testData) genIndexFull(opts ...genOpt) (digest.Digest, error) {
+ gOpt := genOptS{
+ platforms: []*image.Platform{
+ {OS: "linux", Architecture: "amd64"},
+ {OS: "linux", Architecture: "arm64"},
+ },
+ }
+ for _, opt := range opts {
+ opt(&gOpt)
+ }
+ digImgList := []digest.Digest{}
+ for _, p := range gOpt.platforms {
+ iOpts := []genOpt{
+ genWithPlatform(*p),
+ }
+ iOpts = append(iOpts, opts...)
+ mDig, err := td.genManifestFull(iOpts...)
+ if err != nil {
+ return "", err
+ }
+ digImgList = append(digImgList, mDig)
+ }
+ iDig, _, err := td.genIndex(gOpt.platforms, digImgList, opts...)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate test data: %w", err)
+ }
+ return iDig, nil
+}
+
+func genAddJSONFields(v any) any {
+ newT := reflect.StructOf([]reflect.StructField{
+ {
+ Name: "Embed",
+ Anonymous: true,
+ Type: reflect.TypeOf(v),
+ },
+ {
+ Name: "Custom",
+ Type: reflect.TypeOf(""),
+ Tag: reflect.StructTag("json:\"org." + rand.Text() + "\""),
+ },
+ })
+ newV := reflect.New(newT).Elem()
+ newV.Field(0).Set(reflect.ValueOf(v))
+ newV.FieldByName("Custom").SetString(rand.Text())
+ return newV.Interface()
+}