From 9acc6d30eb339d0fa6d3df94db450b693800f4a2 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Tue, 6 Sep 2022 23:30:58 -0700 Subject: [PATCH] refactor buildinfo into provenance capture Change how provenance information is captured from builds. While previously frontend passed the buildinfo sources with metadata, now all information is captured through buildkit. A frontend does not need to implement buildinfo and can't set incorrect/incomplete buildinfo for a build result. All LLB operations can now collect as much provenance info as they like that will be used when making the attestation. Previously this was limited to a single Pin value. For example now we also detect secrets and SSH IDs that the build uses, or if it accesses network, if local sources are used etc.. The new design makes sure this can be easily extended in the future. Provenance capture can now detect builds that do multiple separate subsolves in sequence. For example, first subsolve gathers the sources for the build and second one builds from immutable sources without a network connection. If first solve does not participate in final build result it does not end up in provenance. Signed-off-by: Tonis Tiigi --- client/build_test.go | 63 -- client/client_test.go | 90 +- control/control.go | 9 +- examples/dockerfile2llb/main.go | 7 +- frontend/attestations/parse.go | 12 +- frontend/dockerfile/builder/build.go | 96 +- frontend/dockerfile/dockerfile2llb/convert.go | 107 +-- .../dockerfile/dockerfile2llb/convert_test.go | 51 +- .../dockerfile/dockerfile_buildinfo_test.go | 51 +- .../dockerfile/dockerfile_provenance_test.go | 852 ++++++++++++++++++ frontend/dockerfile/dockerfile_test.go | 260 +----- frontend/gateway/gateway.go | 21 - go.mod | 2 +- go.sum | 4 +- solver/jobs.go | 61 +- solver/llbsolver/bridge.go | 105 +-- solver/llbsolver/ops/build.go | 20 +- solver/llbsolver/ops/diff.go | 4 +- solver/llbsolver/ops/exec.go | 39 +- solver/llbsolver/ops/file.go | 35 +- solver/llbsolver/ops/merge.go | 4 +- solver/llbsolver/ops/opsutils/contenthash.go | 71 ++ solver/llbsolver/ops/opsutils/validate.go | 63 ++ solver/llbsolver/ops/source.go | 43 +- solver/llbsolver/proc/provenance.go | 35 +- solver/llbsolver/proc/refs.go | 9 +- solver/llbsolver/proc/sbom.go | 6 +- solver/llbsolver/provenance.go | 342 +++++++ .../llbsolver}/provenance/buildconfig.go | 7 +- solver/llbsolver/provenance/capture.go | 250 +++++ solver/llbsolver/provenance/predicate.go | 248 +++++ solver/llbsolver/result.go | 75 +- solver/llbsolver/solver.go | 160 +++- solver/llbsolver/vertex.go | 60 +- solver/scheduler_test.go | 241 ++--- solver/types.go | 19 +- source/manager.go | 2 +- util/buildinfo/buildinfo.go | 123 ++- util/provenance/buildinfo.go | 164 ---- util/testutil/imageinfo.go | 13 + .../package-url/packageurl-go/.gitignore | 2 - .../package-url/packageurl-go/.golangci.yaml | 17 + .../package-url/packageurl-go/.travis.yml | 19 - .../packageurl-go/{mit.LICENSE => LICENSE} | 0 .../package-url/packageurl-go/README.md | 6 +- .../package-url/packageurl-go/packageurl.go | 86 +- vendor/modules.txt | 4 +- 47 files changed, 2610 insertions(+), 1348 deletions(-) create mode 100644 frontend/dockerfile/dockerfile_provenance_test.go create mode 100644 solver/llbsolver/ops/opsutils/contenthash.go create mode 100644 solver/llbsolver/ops/opsutils/validate.go create mode 100644 solver/llbsolver/provenance.go rename {util => solver/llbsolver}/provenance/buildconfig.go (95%) create mode 100644 solver/llbsolver/provenance/capture.go create mode 100644 solver/llbsolver/provenance/predicate.go delete mode 100644 util/provenance/buildinfo.go create mode 100644 vendor/github.com/package-url/packageurl-go/.golangci.yaml delete mode 100644 vendor/github.com/package-url/packageurl-go/.travis.yml rename vendor/github.com/package-url/packageurl-go/{mit.LICENSE => LICENSE} (100%) diff --git a/client/build_test.go b/client/build_test.go index 4ce5523533a7..3e1109603df3 100644 --- a/client/build_test.go +++ b/client/build_test.go @@ -3,8 +3,6 @@ package client import ( "bytes" "context" - "encoding/base64" - "encoding/json" "fmt" "io" "os" @@ -16,7 +14,6 @@ import ( "time" "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend/gateway/client" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" @@ -25,7 +22,6 @@ import ( "github.com/moby/buildkit/session/sshforward/sshprovider" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/entitlements" utilsystem "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/util/testutil/echoserver" @@ -58,7 +54,6 @@ func TestClientGatewayIntegration(t *testing.T) { testClientGatewayContainerExtraHosts, testClientGatewayContainerSignal, testWarnings, - testClientGatewayFrontendAttrs, testClientGatewayNilResult, testClientGatewayEmptyImageExec, ), integration.WithMirroredImages(integration.OfficialImages("busybox:latest"))) @@ -1995,64 +1990,6 @@ func testClientGatewayContainerSignal(t *testing.T, sb integration.Sandbox) { checkAllReleasable(t, c, sb, true) } -// moby/buildkit#2476 -func testClientGatewayFrontendAttrs(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - fooattrval := "bar" - bazattrval := "fuu" - - b := func(ctx context.Context, c client.Client) (*client.Result, error) { - st := llb.Image("busybox:latest").Run( - llb.ReadonlyRootFS(), - llb.Args([]string{"/bin/sh", "-c", `echo hello`}), - ) - def, err := st.Marshal(sb.Context()) - if err != nil { - return nil, err - } - res, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - FrontendOpt: map[string]string{ - "build-arg:foo": fooattrval, - }, - }) - require.NoError(t, err) - require.Contains(t, res.Metadata, exptypes.ExporterBuildInfo) - - var bi binfotypes.BuildInfo - require.NoError(t, json.Unmarshal(res.Metadata[exptypes.ExporterBuildInfo], &bi)) - require.Contains(t, bi.Attrs, "build-arg:foo") - bi.Attrs["build-arg:baz"] = &bazattrval - - bmbi, err := json.Marshal(bi) - require.NoError(t, err) - - res.AddMeta(exptypes.ExporterBuildInfo, bmbi) - return res, err - } - - res, err := c.Build(sb.Context(), SolveOpt{}, "", b, nil) - require.NoError(t, err) - - require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo) - decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo]) - require.NoError(t, err) - - var bi binfotypes.BuildInfo - require.NoError(t, json.Unmarshal(decbi, &bi)) - - require.Contains(t, bi.Attrs, "build-arg:foo") - require.Equal(t, &fooattrval, bi.Attrs["build-arg:foo"]) - require.Contains(t, bi.Attrs, "build-arg:baz") - require.Equal(t, &bazattrval, bi.Attrs["build-arg:baz"]) - - checkAllReleasable(t, c, sb, true) -} - func testClientGatewayNilResult(t *testing.T, sb integration.Sandbox) { requiresLinux(t) c, err := New(sb.Context(), sb.Address()) diff --git a/client/client_test.go b/client/client_test.go index 7cb205c716d3..137c20544cd8 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -6199,8 +6199,7 @@ func testBuildInfoExporter(t *testing.T, sb integration.Sandbox) { return nil, err } return c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - FrontendOpt: map[string]string{"build-arg:foo": "bar"}, + Definition: def.ToPB(), }) } @@ -6233,8 +6232,6 @@ func testBuildInfoExporter(t *testing.T, sb integration.Sandbox) { err = json.Unmarshal(decbi, &exbi) require.NoError(t, err) - attrval := "bar" - require.Equal(t, exbi.Attrs, map[string]*string{"build-arg:foo": &attrval}) require.Equal(t, len(exbi.Sources), 1) require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage) require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest") @@ -6271,66 +6268,42 @@ func testBuildInfoInline(t *testing.T, sb integration.Sandbox) { ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - for _, tt := range []struct { - name string - buildAttrs bool - }{{ - "attrsEnabled", - true, - }, { - "attrsDisabled", - false, - }} { - t.Run(tt.name, func(t *testing.T) { - target := registry + "/buildkit/test-buildinfo:latest" - - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "buildinfo-attrs": strconv.FormatBool(tt.buildAttrs), - }, - }, - }, - FrontendAttrs: map[string]string{ - "build-arg:foo": "bar", + target := registry + "/buildkit/test-buildinfo:latest" + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", }, - }, nil) - require.NoError(t, err) + }, + }, + }, nil) + require.NoError(t, err) - img, err := client.GetImage(ctx, target) - require.NoError(t, err) + img, err := client.GetImage(ctx, target) + require.NoError(t, err) - desc, err := img.Config(ctx) - require.NoError(t, err) + desc, err := img.Config(ctx) + require.NoError(t, err) - dt, err := content.ReadBlob(ctx, img.ContentStore(), desc) - require.NoError(t, err) + dt, err := content.ReadBlob(ctx, img.ContentStore(), desc) + require.NoError(t, err) - var config binfotypes.ImageConfig - require.NoError(t, json.Unmarshal(dt, &config)) + var config binfotypes.ImageConfig + require.NoError(t, json.Unmarshal(dt, &config)) - dec, err := base64.StdEncoding.DecodeString(config.BuildInfo) - require.NoError(t, err) + dec, err := base64.StdEncoding.DecodeString(config.BuildInfo) + require.NoError(t, err) - var bi binfotypes.BuildInfo - require.NoError(t, json.Unmarshal(dec, &bi)) + var bi binfotypes.BuildInfo + require.NoError(t, json.Unmarshal(dec, &bi)) - if tt.buildAttrs { - attrval := "bar" - require.Contains(t, bi.Attrs, "build-arg:foo") - require.Equal(t, bi.Attrs["build-arg:foo"], &attrval) - } else { - require.NotContains(t, bi.Attrs, "build-arg:foo") - } - require.Equal(t, len(bi.Sources), 1) - require.Equal(t, bi.Sources[0].Type, binfotypes.SourceTypeDockerImage) - require.Equal(t, bi.Sources[0].Ref, "docker.io/library/busybox:latest") - }) - } + require.Equal(t, len(bi.Sources), 1) + require.Equal(t, bi.Sources[0].Type, binfotypes.SourceTypeDockerImage) + require.Equal(t, bi.Sources[0].Ref, "docker.io/library/busybox:latest") } func testBuildInfoNoExport(t *testing.T, sb integration.Sandbox) { @@ -6348,8 +6321,7 @@ func testBuildInfoNoExport(t *testing.T, sb integration.Sandbox) { return nil, err } return c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - FrontendOpt: map[string]string{"build-arg:foo": "bar"}, + Definition: def.ToPB(), }) } @@ -6364,8 +6336,6 @@ func testBuildInfoNoExport(t *testing.T, sb integration.Sandbox) { err = json.Unmarshal(decbi, &exbi) require.NoError(t, err) - attrval := "bar" - require.Equal(t, exbi.Attrs, map[string]*string{"build-arg:foo": &attrval}) require.Equal(t, len(exbi.Sources), 1) require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage) require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest") diff --git a/control/control.go b/control/control.go index 1fb35eb5a909..591dac90b6d6 100644 --- a/control/control.go +++ b/control/control.go @@ -330,6 +330,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* } var procs []llbsolver.Processor + + if len(attests) > 0 { + procs = append(procs, proc.ForceRefsProcessor) + } + if attrs, ok := attests["sbom"]; ok { src := attrs["generator"] if src == "" { @@ -340,11 +345,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src) } ref = reference.TagNameOnly(ref) - procs = append(procs, proc.ForceRefsProcessor, proc.SBOMProcessor(ref.String())) + procs = append(procs, proc.SBOMProcessor(ref.String())) } if attrs, ok := attests["provenance"]; ok { - procs = append(procs, proc.ForceRefsProcessor, proc.ProvenanceProcessor(attrs)) + procs = append(procs, proc.ProvenanceProcessor(attrs)) } resp, err := c.solver.Solve(ctx, req.Ref, req.Session, frontend.SolveRequest{ diff --git a/examples/dockerfile2llb/main.go b/examples/dockerfile2llb/main.go index 224aee47f898..b575765a41f0 100644 --- a/examples/dockerfile2llb/main.go +++ b/examples/dockerfile2llb/main.go @@ -41,7 +41,7 @@ func xmain() error { caps := pb.Caps.CapSet(pb.Caps.All()) - state, img, bi, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{ + state, img, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{ MetaResolver: imagemetaresolver.Default(), Target: opt.target, LLBCaps: &caps, @@ -62,11 +62,6 @@ func xmain() error { return err } } - if opt.partialMetadataFile != "" { - if err := writeJSON(opt.partialMetadataFile, bi); err != nil { - return err - } - } return nil } diff --git a/frontend/attestations/parse.go b/frontend/attestations/parse.go index b3eb35c07616..b5740a964aaa 100644 --- a/frontend/attestations/parse.go +++ b/frontend/attestations/parse.go @@ -32,6 +32,15 @@ func Filter(v map[string]string) map[string]string { return attests } +func Validate(values map[string]map[string]string) (map[string]map[string]string, error) { + for k := range values { + if k != KeyTypeSbom && k != KeyTypeProvenance { + return nil, errors.Errorf("unknown attestation type %q", k) + } + } + return values, nil +} + func Parse(values map[string]string) (map[string]map[string]string, error) { attests := make(map[string]string) for k, v := range values { @@ -68,5 +77,6 @@ func Parse(values map[string]string) (map[string]map[string]string, error) { attrs[parts[0]] = parts[1] } } - return out, nil + + return Validate(out) } diff --git a/frontend/dockerfile/builder/build.go b/frontend/dockerfile/builder/build.go index dd318136e2f3..81c690d0aa20 100644 --- a/frontend/dockerfile/builder/build.go +++ b/frontend/dockerfile/builder/build.go @@ -32,7 +32,6 @@ import ( "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/gitutil" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -524,7 +523,7 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { opt.Warn = nil } opt.ContextByName = contextByNameFunc(c, c.BuildOpts().SessionID) - st, img, bi, err := dockerfile2llb.Dockerfile2LLB(ctx2, dtDockerfile, opt) + st, img, err := dockerfile2llb.Dockerfile2LLB(ctx2, dtDockerfile, opt) if err != nil { return err @@ -579,11 +578,6 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { return err } - buildinfo, err := json.Marshal(bi) - if err != nil { - return errors.Wrapf(err, "failed to marshal build info") - } - p := platforms.DefaultSpec() if tp != nil { p = *tp @@ -593,7 +587,6 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { if !exportMap { res.AddMeta(exptypes.ExporterImageConfigKey, config) - res.AddMeta(exptypes.ExporterBuildInfo, buildinfo) res.SetRef(ref) expPlatforms.Platforms[i] = exptypes.Platform{ @@ -602,7 +595,6 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { } } else { res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), buildinfo) res.AddRef(k, ref) expPlatforms.Platforms[i] = exptypes.Platform{ ID: k, @@ -871,11 +863,11 @@ func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) c return opts } -func contextByNameFunc(c client.Client, sessionID string) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { - return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { +func contextByNameFunc(c client.Client, sessionID string) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { + return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { named, err := reference.ParseNormalizedNamed(name) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "invalid context name %s", name) + return nil, nil, errors.Wrapf(err, "invalid context name %s", name) } name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") @@ -885,28 +877,28 @@ func contextByNameFunc(c client.Client, sessionID string) func(context.Context, } if p != nil { name := name + "::" + platforms.Format(platforms.Normalize(*p)) - st, img, bi, err := contextByName(ctx, c, sessionID, name, p, resolveMode) + st, img, err := contextByName(ctx, c, sessionID, name, p, resolveMode) if err != nil { - return nil, nil, nil, err + return nil, nil, err } if st != nil { - return st, img, bi, nil + return st, img, nil } } return contextByName(ctx, c, sessionID, name, p, resolveMode) } } -func contextByName(ctx context.Context, c client.Client, sessionID, name string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { +func contextByName(ctx context.Context, c client.Client, sessionID, name string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, error) { opts := c.BuildOpts().Opts v, ok := opts[contextPrefix+name] if !ok { - return nil, nil, nil, nil + return nil, nil, nil } vv := strings.SplitN(v, ":", 2) if len(vv) != 2 { - return nil, nil, nil, errors.Errorf("invalid context specifier %s for %s", v, name) + return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, name) } // allow git@ without protocol for SSH URLs for backwards compatibility if strings.HasPrefix(vv[0], "git@") { @@ -917,7 +909,7 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, ref := strings.TrimPrefix(vv[1], "//") if ref == "scratch" { st := llb.Scratch() - return &st, nil, nil, nil + return &st, nil, nil } imgOpt := []llb.ImageOption{ @@ -929,7 +921,7 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, named, err := reference.ParseNormalizedNamed(ref) if err != nil { - return nil, nil, nil, err + return nil, nil, err } named = reference.TagNameOnly(named) @@ -942,45 +934,45 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, SessionID: sessionID, }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } var img dockerfile2llb.Image if err := json.Unmarshal(data, &img); err != nil { - return nil, nil, nil, err + return nil, nil, err } img.Created = nil st := llb.Image(ref, imgOpt...) st, err = st.WithImageConfig(data) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - return &st, &img, nil, nil + return &st, &img, nil case "git": st, ok := detectGitContext(v, true) if !ok { - return nil, nil, nil, errors.Errorf("invalid git context %s", v) + return nil, nil, errors.Errorf("invalid git context %s", v) } - return st, nil, nil, nil + return st, nil, nil case "http", "https": st, ok := detectGitContext(v, true) if !ok { httpst := llb.HTTP(v, llb.WithCustomName("[context "+name+"] "+v)) st = &httpst } - return st, nil, nil, nil + return st, nil, nil case "oci-layout": ref := strings.TrimPrefix(vv[1], "//") // expected format is storeID@hash parts := strings.SplitN(ref, "@", 2) if len(parts) != 2 { - return nil, nil, nil, errors.Errorf("invalid oci-layout format '%s', must be oci-layout:///content-store@sha256:digest", vv[1]) + return nil, nil, errors.Errorf("invalid oci-layout format '%s', must be oci-layout:///content-store@sha256:digest", vv[1]) } storeID := parts[0] dig, err := digest.Parse(parts[1]) if err != nil { - return nil, nil, nil, errors.Errorf("invalid digest format '%s', must be oci-layout:///content-store@sha256:digest", vv[1]) + return nil, nil, errors.Errorf("invalid digest format '%s', must be oci-layout:///content-store@sha256:digest", vv[1]) } // the ref now is "content-store@sha256:digest" @@ -1001,12 +993,12 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, ResolverType: llb.ResolverTypeOCILayout, }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } var img dockerfile2llb.Image if err := json.Unmarshal(data, &img); err != nil { - return nil, nil, nil, err + return nil, nil, err } st := llb.OCILayout(storeID, dig, @@ -1015,9 +1007,9 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, ) st, err = st.WithImageConfig(data) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - return &st, &img, nil, nil + return &st, &img, nil case "local": st := llb.Local(vv[1], llb.SessionID(c.BuildOpts().SessionID), @@ -1028,18 +1020,18 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, ) def, err := st.Marshal(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, err } res, err := c.Solve(ctx, client.SolveRequest{ Evaluate: true, Definition: def.ToPB(), }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } ref, err := res.SingleRef() if err != nil { - return nil, nil, nil, err + return nil, nil, err } dt, _ := ref.ReadFile(ctx, client.ReadRequest{ Filename: dockerignoreFilename, @@ -1048,7 +1040,7 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, if len(dt) != 0 { excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) if err != nil { - return nil, nil, nil, err + return nil, nil, err } } st = llb.Local(vv[1], @@ -1057,49 +1049,37 @@ func contextByName(ctx context.Context, c client.Client, sessionID, name string, llb.SharedKeyHint("context:"+name), llb.ExcludePatterns(excludes), ) - return &st, nil, nil, nil + return &st, nil, nil case "input": inputs, err := c.Inputs(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, err } st, ok := inputs[vv[1]] if !ok { - return nil, nil, nil, errors.Errorf("invalid input %s for %s", vv[1], name) + return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], name) } md, ok := opts[inputMetadataPrefix+vv[1]] if ok { m := make(map[string][]byte) if err := json.Unmarshal([]byte(md), &m); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) - } - var bi *binfotypes.BuildInfo - if dtbi, ok := m[exptypes.ExporterBuildInfo]; ok { - var depbi binfotypes.BuildInfo - if err := json.Unmarshal(dtbi, &depbi); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse buildinfo for %s", name) - } - bi = &binfotypes.BuildInfo{ - Deps: map[string]binfotypes.BuildInfo{ - strings.SplitN(vv[1], "::", 2)[0]: depbi, - }, - } + return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) } var img *dockerfile2llb.Image if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { st, err = st.WithImageConfig(dtic) if err != nil { - return nil, nil, nil, err + return nil, nil, err } if err := json.Unmarshal(dtic, &img); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse image config for %s", name) + return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", name) } } - return &st, img, bi, nil + return &st, img, nil } - return &st, nil, nil, nil + return &st, nil, nil default: - return nil, nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], name) + return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], name) } } diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go index 427d13341569..e41a89c05dd2 100644 --- a/frontend/dockerfile/dockerfile2llb/convert.go +++ b/frontend/dockerfile/dockerfile2llb/convert.go @@ -13,7 +13,6 @@ import ( "sort" "strconv" "strings" - "sync" "time" "github.com/containerd/containerd/platforms" @@ -75,19 +74,19 @@ type ConvertOpt struct { Hostname string SourceDateEpoch *time.Time Warn func(short, url string, detail [][]byte, location *parser.Range) - ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, *binfotypes.BuildInfo, error) + ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) } -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *binfotypes.BuildInfo, error) { - ds, bi, err := toDispatchState(ctx, dt, opt) +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) { + ds, err := toDispatchState(ctx, dt, opt) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - return &ds.state, &ds.image, bi, nil + return &ds.state, &ds.image, nil } func Dockefile2Outline(ctx context.Context, dt []byte, opt ConvertOpt) (*outline.Outline, error) { - ds, _, err := toDispatchState(ctx, dt, opt) + ds, err := toDispatchState(ctx, dt, opt) if err != nil { return nil, err } @@ -123,38 +122,26 @@ func ListTargets(ctx context.Context, dt []byte) (*targets.List, error) { return l, nil } -func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchState, *binfotypes.BuildInfo, error) { - buildInfo := &binfotypes.BuildInfo{} - buildInfoDepsMu := sync.Mutex{} +func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchState, error) { contextByName := opt.ContextByName - opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, *binfotypes.BuildInfo, error) { + opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) { if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { if contextByName != nil { if p == nil { p = opt.TargetPlatform } - st, img, bi, err := contextByName(ctx, name, resolveMode, p) + st, img, err := contextByName(ctx, name, resolveMode, p) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - if bi != nil && bi.Deps != nil { - buildInfoDepsMu.Lock() - if buildInfo.Deps == nil { - buildInfo.Deps = make(map[string]binfotypes.BuildInfo) - } - for k := range bi.Deps { - buildInfo.Deps[k] = bi.Deps[k] - } - buildInfoDepsMu.Unlock() - } - return st, img, bi, nil + return st, img, nil } } - return nil, nil, nil, nil + return nil, nil, nil } if len(dt) == 0 { - return nil, nil, errors.Errorf("the Dockerfile cannot be empty") + return nil, errors.Errorf("the Dockerfile cannot be empty") } if opt.ContextLocalName == "" { @@ -170,7 +157,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS dockerfile, err := parser.Parse(bytes.NewReader(dt)) if err != nil { - return nil, nil, err + return nil, err } for _, w := range dockerfile.Warnings { @@ -181,7 +168,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS stages, metaArgs, err := instructions.Parse(dockerfile.AST) if err != nil { - return nil, nil, err + return nil, err } shlex := shell.NewLex(dockerfile.EscapeToken) @@ -216,10 +203,10 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS for i, st := range stages { name, used, err := shlex.ProcessWordWithMatches(st.BaseName, metaArgsToMap(optMetaArgs)) if err != nil { - return nil, nil, parser.WithLocation(err, st.Location) + return nil, parser.WithLocation(err, st.Location) } if name == "" { - return nil, nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) + return nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) } st.BaseName = name @@ -236,12 +223,12 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS if v := st.Platform; v != "" { v, u, err := shlex.ProcessWordWithMatches(v, metaArgsToMap(optMetaArgs)) if err != nil { - return nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) + return nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) } p, err := platforms.Parse(v) if err != nil { - return nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) + return nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) } for k := range u { used[k] = struct{}{} @@ -250,9 +237,9 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if st.Name != "" { - s, img, bi, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform) + s, img, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform) if err != nil { - return nil, nil, err + return nil, err } if s != nil { ds.noinit = true @@ -267,9 +254,6 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } } - if bi != nil { - ds.buildInfo = *bi - } allDispatchStates.addState(ds) continue } @@ -319,7 +303,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS var ok bool target, ok = allDispatchStates.findStateByName(opt.Target) if !ok { - return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) + return nil, errors.Errorf("target stage %s could not be found", opt.Target) } } @@ -329,7 +313,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS for i, cmd := range d.stage.Commands { newCmd, err := toCommand(cmd, allDispatchStates) if err != nil { - return nil, nil, err + return nil, err } d.commands[i] = newCmd for _, src := range newCmd.sources { @@ -344,7 +328,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if has, state := hasCircularDependency(allDispatchStates.states); has { - return nil, nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) + return nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) } if len(allDispatchStates.states) == 1 { @@ -387,7 +371,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS d.stage.BaseName = reference.TagNameOnly(ref).String() var isScratch bool - st, img, bi, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform) + st, img, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform) if err != nil { return err } @@ -397,9 +381,6 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } else { d.image = emptyImage(platformOpt.targetPlatform) } - if bi != nil { - d.buildInfo = *bi - } d.state = st.Platform(*platform) d.platform = platform return nil @@ -477,7 +458,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if err := eg.Wait(); err != nil { - return nil, nil, err + return nil, err } buildContext := &mutableOutput{} @@ -488,19 +469,6 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS continue } - // collect build sources and dependencies - if len(d.buildInfo.Sources) > 0 { - buildInfo.Sources = append(buildInfo.Sources, d.buildInfo.Sources...) - } - if d.buildInfo.Deps != nil { - for name, bi := range d.buildInfo.Deps { - if buildInfo.Deps == nil { - buildInfo.Deps = make(map[string]binfotypes.BuildInfo) - } - buildInfo.Deps[name] = bi - } - } - if d.base != nil { d.state = d.base.state d.platform = d.base.platform @@ -509,11 +477,11 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS // make sure that PATH is always set if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - var os string + var pathOS string if d.platform != nil { - os = d.platform.OS + pathOS = d.platform.OS } - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(os)) + d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(pathOS)) } // initialize base metadata from image conf @@ -526,12 +494,12 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if d.image.Config.WorkingDir != "" { if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { - return nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } } if d.image.Config.User != "" { if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { - return nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } } d.state = d.state.Network(opt.ForceNetMode) @@ -556,13 +524,13 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil { - return nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } d.image.Config.OnBuild = nil for _, cmd := range d.commands { if err := dispatch(d, cmd, opt); err != nil { - return nil, nil, parser.WithLocation(err, cmd.Location()) + return nil, parser.WithLocation(err, cmd.Location()) } } @@ -571,13 +539,6 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } - // sort build sources - if len(buildInfo.Sources) > 0 { - sort.Slice(buildInfo.Sources, func(i, j int) bool { - return buildInfo.Sources[i].Ref < buildInfo.Sources[j].Ref - }) - } - if len(opt.Labels) != 0 && target.image.Config.Labels == nil { target.image.Config.Labels = make(map[string]string, len(opt.Labels)) } @@ -615,7 +576,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS target.image.Variant = platformOpt.targetPlatform.Variant } - return target, buildInfo, nil + return target, nil } func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string { diff --git a/frontend/dockerfile/dockerfile2llb/convert_test.go b/frontend/dockerfile/dockerfile2llb/convert_test.go index 5c1817addf91..8fe1aa6e2ad8 100644 --- a/frontend/dockerfile/dockerfile2llb/convert_test.go +++ b/frontend/dockerfile/dockerfile2llb/convert_test.go @@ -1,16 +1,12 @@ package dockerfile2llb import ( - "strings" "testing" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/shell" "github.com/moby/buildkit/util/appcontext" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string { @@ -35,7 +31,7 @@ ENV FOO bar COPY f1 f2 /sub/ RUN ls -l ` - _, _, _, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.NoError(t, err) df = `FROM scratch AS foo @@ -44,7 +40,7 @@ FROM foo COPY --from=foo f1 / COPY --from=0 f2 / ` - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.NoError(t, err) df = `FROM scratch AS foo @@ -53,12 +49,12 @@ FROM foo COPY --from=foo f1 / COPY --from=0 f2 / ` - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ Target: "Foo", }) assert.NoError(t, err) - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ Target: "nosuch", }) assert.Error(t, err) @@ -66,21 +62,21 @@ COPY --from=0 f2 / df = `FROM scratch ADD http://github.com/moby/buildkit/blob/master/README.md / ` - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.NoError(t, err) df = `FROM scratch COPY http://github.com/moby/buildkit/blob/master/README.md / ` - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.EqualError(t, err, "source can't be a URL for COPY") df = `FROM "" AS foo` - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.Error(t, err) df = `FROM ${BLANK} AS foo` - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.Error(t, err) } @@ -178,7 +174,7 @@ func TestDockerfileCircularDependencies(t *testing.T) { df := `FROM busybox AS stage0 COPY --from=stage0 f1 /sub/ ` - _, _, _, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.EqualError(t, err, "circular dependency detected on stage: stage0") // multiple stages with circular dependency @@ -189,33 +185,6 @@ COPY --from=stage0 f2 /sub/ FROM busybox AS stage2 COPY --from=stage1 f2 /sub/ ` - _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) + _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.EqualError(t, err, "circular dependency detected on stage: stage0") } - -// moby/buildkit#2311 -func TestTargetBuildInfo(t *testing.T) { - df := ` -FROM busybox -ADD https://raw.githubusercontent.com/moby/buildkit/master/README.md / -` - _, _, bi, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ - TargetPlatform: &ocispecs.Platform{ - Architecture: "amd64", - OS: "linux", - }, - BuildPlatforms: []ocispecs.Platform{ - { - Architecture: "amd64", - OS: "linux", - }, - }, - }) - require.NoError(t, err) - - require.Equal(t, 1, len(bi.Sources)) - assert.Equal(t, binfotypes.SourceTypeDockerImage, bi.Sources[0].Type) - assert.Equal(t, "busybox", bi.Sources[0].Ref) - assert.True(t, strings.HasPrefix(bi.Sources[0].Alias, "docker.io/library/busybox@")) - assert.NotEmpty(t, bi.Sources[0].Pin) -} diff --git a/frontend/dockerfile/dockerfile_buildinfo_test.go b/frontend/dockerfile/dockerfile_buildinfo_test.go index 4d68462a0f62..ac9460d2b66e 100644 --- a/frontend/dockerfile/dockerfile_buildinfo_test.go +++ b/frontend/dockerfile/dockerfile_buildinfo_test.go @@ -112,9 +112,16 @@ COPY --from=alpine /bin/busybox /alpine-busybox require.Contains(t, bi.Attrs, "context") require.Equal(t, server.URL+"/.git#buildinfo", *bi.Attrs["context"]) - sources := bi.Sources - require.Equal(t, 3, len(sources)) + _, isGateway := f.(*gatewayFrontend) + sources := bi.Sources + if isGateway { + require.Equal(t, 5, len(sources), "%+v", sources) + assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) + assert.Contains(t, sources[0].Ref, "buildkit_test") + sources = sources[1:] + } + require.Equal(t, 4, len(sources), "%+v", sources) assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/alpine:latest@sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300", sources[0].Ref) assert.Equal(t, "sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300", sources[0].Pin) @@ -123,9 +130,13 @@ COPY --from=alpine /bin/busybox /alpine-busybox assert.Equal(t, "docker.io/library/busybox:latest", sources[1].Ref) assert.NotEmpty(t, sources[1].Pin) - assert.Equal(t, binfotypes.SourceTypeHTTP, sources[2].Type) - assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", sources[2].Ref) - assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[2].Pin) + assert.Equal(t, binfotypes.SourceTypeGit, sources[2].Type) + assert.Equal(t, server.URL+"/.git#buildinfo", sources[2].Ref) + assert.NotEmpty(t, sources[2].Pin) + + assert.Equal(t, binfotypes.SourceTypeHTTP, sources[3].Type) + assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", sources[3].Ref) + assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[3].Pin) } func testBuildInfoSourcesNoop(t *testing.T, sb integration.Sandbox) { @@ -180,8 +191,12 @@ FROM busybox:latest require.NoError(t, err) sources := bi.Sources - require.Equal(t, 1, len(sources)) + if _, isGateway := f.(*gatewayFrontend); isGateway { + require.Equal(t, 2, len(sources), "%+v", sources) + sources = sources[1:] + } + require.Equal(t, 1, len(sources)) assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/busybox:latest", sources[0].Ref) assert.NotEmpty(t, sources[0].Pin) @@ -303,8 +318,14 @@ ADD https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md / require.Contains(t, bi.Attrs, "build-arg:foo") require.Equal(t, "bar", *bi.Attrs["build-arg:foo"]) + _, isGateway := f.(*gatewayFrontend) + sources := bi.Sources - require.Equal(t, 2, len(sources)) + if isGateway { + require.Equal(t, 3, len(sources), "%+v", sources) + sources = sources[1:] + } + require.Equal(t, 2, len(sources), "%+v", sources) assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/busybox:latest", sources[0].Ref) @@ -379,8 +400,15 @@ COPY --from=base /out / require.Contains(t, bi.Attrs, "build-arg:foo") require.Equal(t, "bar", *bi.Attrs["build-arg:foo"]) + _, isGateway := f.(*gatewayFrontend) + sources := bi.Sources - require.Equal(t, 1, len(sources)) + if isGateway { + require.Equal(t, 2, len(sources), "%+v", sources) + sources = sources[1:] + } else { + require.Equal(t, 1, len(sources)) + } assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/alpine:latest", sources[0].Ref) assert.NotEmpty(t, sources[0].Pin) @@ -460,7 +488,12 @@ COPY --from=base /o* / require.Contains(t, bi.Attrs, "build-arg:foo") require.Equal(t, "bar", *bi.Attrs["build-arg:foo"]) - require.Equal(t, 0, len(bi.Sources)) + _, isGateway := f.(*gatewayFrontend) + if isGateway { + require.Equal(t, 1, len(bi.Sources)) + } else { + require.Equal(t, 0, len(bi.Sources)) + } } func testBuildInfoDeps(t *testing.T, sb integration.Sandbox) { diff --git a/frontend/dockerfile/dockerfile_provenance_test.go b/frontend/dockerfile/dockerfile_provenance_test.go new file mode 100644 index 000000000000..6729f7c1fcc6 --- /dev/null +++ b/frontend/dockerfile/dockerfile_provenance_test.go @@ -0,0 +1,852 @@ +package dockerfile + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/containerd/continuity/fs/fstest" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend/dockerfile/builder" + gateway "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/solver/llbsolver/provenance" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/testutil" + "github.com/moby/buildkit/util/testutil/integration" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func testProvenanceAttestation(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +RUN echo "ok" > /foo +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + for _, mode := range []string{"min", "max"} { + t.Run(mode, func(t *testing.T) { + target := registry + "/buildkit/testwithprovenance:" + mode + provReq := "" + if mode == "max" { + provReq = "mode=max" + } + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + FrontendAttrs: map[string]string{ + "attest:provenance": provReq, + "build-arg:FOO": "bar", + "label:lbl": "abc", + "vcs:source": "https://example.invalid/repo.git", + "vcs:revision": "123456", + "filename": "Dockerfile", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + require.NotNil(t, img) + require.Equal(t, []byte("ok\n"), img.Layers[1]["foo"].Data) + + att := imgs.Find("unknown/unknown") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + _, isClient := f.(*clientFrontend) + _, isGateway := f.(*gatewayFrontend) + + args := pred.Invocation.Parameters.Args + if isClient { + require.Equal(t, "", pred.Invocation.Parameters.Frontend) + require.Equal(t, 0, len(args), "%v", args) + require.False(t, pred.Metadata.Completeness.Parameters) + require.Equal(t, "", pred.Invocation.ConfigSource.EntryPoint) + } else if isGateway { + require.Equal(t, "gateway.v0", pred.Invocation.Parameters.Frontend) + + if mode == "max" { + require.Equal(t, 3, len(args), "%v", args) + require.True(t, pred.Metadata.Completeness.Parameters) + + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + require.Contains(t, args["source"], "buildkit_test/") + } else { + require.False(t, pred.Metadata.Completeness.Parameters) + require.Equal(t, 1, len(args), "%v", args) + require.Contains(t, args["source"], "buildkit_test/") + } + } else { + require.Equal(t, "dockerfile.v0", pred.Invocation.Parameters.Frontend) + + if mode == "max" { + require.Equal(t, 2, len(args)) + require.True(t, pred.Metadata.Completeness.Parameters) + + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + } else { + require.False(t, pred.Metadata.Completeness.Parameters) + require.Equal(t, 0, len(args), "%v", args) + } + } + + expectedBase := "pkg:docker/busybox@latest?platform=" + url.PathEscape(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + if isGateway { + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "docker/buildkit_test") + require.Equal(t, expectedBase, pred.Materials[1].URI) + require.NotEmpty(t, pred.Materials[1].Digest["sha256"]) + } else { + require.Equal(t, 1, len(pred.Materials), "%+v", pred.Materials) + require.Equal(t, expectedBase, pred.Materials[0].URI) + require.NotEmpty(t, pred.Materials[0].Digest["sha256"]) + } + + if !isClient { + require.Equal(t, "Dockerfile", pred.Invocation.ConfigSource.EntryPoint) + require.Equal(t, "https://example.invalid/repo.git", pred.Metadata.BuildKitMetadata.VCS["source"]) + require.Equal(t, "123456", pred.Metadata.BuildKitMetadata.VCS["revision"]) + } + + require.NotEmpty(t, pred.Metadata.BuildInvocationID) + + require.Equal(t, 2, len(pred.Invocation.Parameters.Locals), "%+v", pred.Invocation.Parameters.Locals) + require.Equal(t, "context", pred.Invocation.Parameters.Locals[0].Name) + require.Equal(t, "dockerfile", pred.Invocation.Parameters.Locals[1].Name) + + require.NotNil(t, pred.Metadata.BuildFinishedOn) + require.True(t, time.Since(*pred.Metadata.BuildFinishedOn) < 5*time.Minute) + require.NotNil(t, pred.Metadata.BuildStartedOn) + require.True(t, time.Since(*pred.Metadata.BuildStartedOn) < 5*time.Minute) + require.True(t, pred.Metadata.BuildStartedOn.Before(*pred.Metadata.BuildFinishedOn)) + + require.True(t, pred.Metadata.Completeness.Environment) + require.Equal(t, platforms.Format(platforms.Normalize(platforms.DefaultSpec())), pred.Invocation.Environment.Platform) + + require.False(t, pred.Metadata.Completeness.Materials) + require.False(t, pred.Metadata.Reproducible) + require.False(t, pred.Metadata.Completeness.Hermetic) + + if mode == "max" { + require.Equal(t, 2, len(pred.Metadata.BuildKitMetadata.Layers)) + require.NotNil(t, pred.Metadata.BuildKitMetadata.Source) + require.Equal(t, "Dockerfile", pred.Metadata.BuildKitMetadata.Source.Infos[0].Filename) + require.Equal(t, dockerfile, pred.Metadata.BuildKitMetadata.Source.Infos[0].Data) + require.NotNil(t, pred.BuildConfig) + + require.Equal(t, 3, len(pred.BuildConfig.Definition)) + } else { + require.Equal(t, 0, len(pred.Metadata.BuildKitMetadata.Layers)) + require.Nil(t, pred.Metadata.BuildKitMetadata.Source) + require.Nil(t, pred.BuildConfig) + } + }) + } +} + +func testGitProvenanceAttestation(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +RUN --network=none echo "git" > /foo +COPY myapp.Dockerfile / +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("myapp.Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + err = runShell(dir, + "git init", + "git config --local user.email test", + "git config --local user.name test", + "git add myapp.Dockerfile", + "git commit -m initial", + "git branch v1", + "git update-server-info", + ) + require.NoError(t, err) + + cmd := exec.Command("git", "rev-parse", "v1") + cmd.Dir = dir + expectedGitSHA, err := cmd.Output() + require.NoError(t, err) + + server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir)))) + defer server.Close() + + target := registry + "/buildkit/testwithprovenance:git" + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "context": server.URL + "/.git#v1", + "attest:provenance": "", + "filename": "myapp.Dockerfile", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + require.NotNil(t, img) + require.Equal(t, []byte("git\n"), img.Layers[1]["foo"].Data) + + att := imgs.Find("unknown/unknown") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + _, isClient := f.(*clientFrontend) + _, isGateway := f.(*gatewayFrontend) + + if isClient { + require.Empty(t, pred.Invocation.Parameters.Frontend) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + require.Equal(t, "", pred.Invocation.ConfigSource.EntryPoint) + } else { + require.NotEmpty(t, pred.Invocation.Parameters.Frontend) + require.Equal(t, server.URL+"/.git#v1", pred.Invocation.ConfigSource.URI) + require.Equal(t, "myapp.Dockerfile", pred.Invocation.ConfigSource.EntryPoint) + } + + expBase := "pkg:docker/busybox@latest?platform=" + url.PathEscape(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + if isGateway { + require.Equal(t, 3, len(pred.Materials), "%+v", pred.Materials) + + require.Contains(t, pred.Materials[0].URI, "pkg:docker/buildkit_test/") + require.NotEmpty(t, pred.Materials[0].Digest) + + require.Equal(t, expBase, pred.Materials[1].URI) + require.NotEmpty(t, pred.Materials[1].Digest["sha256"]) + + require.Equal(t, server.URL+"/.git#v1", pred.Materials[2].URI) + require.Equal(t, strings.TrimSpace(string(expectedGitSHA)), pred.Materials[2].Digest["sha1"]) + } else { + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + + require.Equal(t, expBase, pred.Materials[0].URI) + require.NotEmpty(t, pred.Materials[0].Digest["sha256"]) + + require.Equal(t, server.URL+"/.git#v1", pred.Materials[1].URI) + require.Equal(t, strings.TrimSpace(string(expectedGitSHA)), pred.Materials[1].Digest["sha1"]) + } + + require.Equal(t, 0, len(pred.Invocation.Parameters.Locals)) + + require.True(t, pred.Metadata.Completeness.Materials) + require.True(t, pred.Metadata.Completeness.Hermetic) + require.True(t, pred.Metadata.Completeness.Environment) + + if isClient { + require.False(t, pred.Metadata.Completeness.Parameters) + } else { + require.True(t, pred.Metadata.Completeness.Parameters) + } + require.False(t, pred.Metadata.Reproducible) + + require.Equal(t, 0, len(pred.Metadata.BuildKitMetadata.VCS), "%+v", pred.Metadata.BuildKitMetadata.VCS) +} + +func testMultiPlatformProvenance(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +ARG TARGETARCH +RUN echo "ok-$TARGETARCH" > /foo +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + target := registry + "/buildkit/testmultiprovenance:latest" + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=max", + "build-arg:FOO": "bar", + "label:lbl": "abc", + "platform": "linux/amd64,linux/arm64", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 4, len(imgs.Images)) + + _, isClient := f.(*clientFrontend) + _, isGateway := f.(*gatewayFrontend) + + for _, p := range []string{"linux/amd64", "linux/arm64"} { + img := imgs.Find(p) + require.NotNil(t, img) + if p == "linux/amd64" { + require.Equal(t, []byte("ok-amd64\n"), img.Layers[1]["foo"].Data) + } else { + require.Equal(t, []byte("ok-arm64\n"), img.Layers[1]["foo"].Data) + } + + att := imgs.FindAttestation(p) + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + if isGateway { + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "buildkit_test") + require.Contains(t, pred.Materials[1].URI, "pkg:docker/busybox@latest") + require.Contains(t, pred.Materials[1].URI, url.PathEscape(p)) + } else { + require.Equal(t, 1, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "pkg:docker/busybox@latest") + require.Contains(t, pred.Materials[0].URI, url.PathEscape(p)) + } + + args := pred.Invocation.Parameters.Args + if isClient { + require.Equal(t, 0, len(args), "%+v", args) + } else if isGateway { + require.Equal(t, 3, len(args), "%+v", args) + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + require.Contains(t, args["source"], "buildkit_test/") + } else { + require.Equal(t, 2, len(args), "%+v", args) + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + } + } +} + +func testClientFrontendProvenance(t *testing.T, sb integration.Sandbox) { + // Building with client frontend does not capture frontend provenance + // because frontend runs in client, not in BuildKit. + // This test builds Dockerfile inside a client frontend ensuring that + // in that case frontend provenance is captured. + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/clientprovenance:latest" + + f := getFrontend(t, sb) + + _, isClient := f.(*clientFrontend) + if !isClient { + t.Skip("not a client frontend") + } + + dockerfile := []byte(` + FROM alpine as x86target + RUN echo "alpine" > /foo + + FROM busybox:latest AS armtarget + RUN --network=none echo "bbox" > /foo + `) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.HTTP("https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md") + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + // This does not show up in provenance + res0, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + dt, err := res0.Ref.ReadFile(ctx, gateway.ReadRequest{ + Filename: "README.md", + }) + if err != nil { + return nil, err + } + + res1, err := c.Solve(ctx, gateway.SolveRequest{ + Frontend: "dockerfile.v0", + FrontendOpt: map[string]string{ + "build-arg:FOO": string(dt[:3]), + "target": "armtarget", + }, + }) + if err != nil { + return nil, err + } + + res2, err := c.Solve(ctx, gateway.SolveRequest{ + Frontend: "dockerfile.v0", + FrontendOpt: map[string]string{ + "build-arg:FOO": string(dt[4:8]), + "target": "x86target", + }, + }) + if err != nil { + return nil, err + } + + res := gateway.NewResult() + res.AddRef("linux/arm64", res1.Ref) + res.AddRef("linux/amd64", res2.Ref) + + pl, err := json.Marshal(exptypes.Platforms{ + Platforms: []exptypes.Platform{ + { + ID: "linux/arm64", + Platform: ocispecs.Platform{OS: "linux", Architecture: "arm64"}, + }, + { + ID: "linux/amd64", + Platform: ocispecs.Platform{OS: "linux", Architecture: "amd64"}, + }, + }, + }) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, pl) + + return res, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=full", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 4, len(imgs.Images)) + + img := imgs.Find("linux/arm64") + require.NotNil(t, img) + require.Equal(t, []byte("bbox\n"), img.Layers[1]["foo"].Data) + + att := imgs.FindAttestation("linux/arm64") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + args := pred.Invocation.Parameters.Args + require.Equal(t, 2, len(args), "%+v", args) + require.Equal(t, "The", args["build-arg:FOO"]) + require.Equal(t, "armtarget", args["target"]) + + require.Equal(t, 2, len(pred.Invocation.Parameters.Locals)) + require.Equal(t, 1, len(pred.Materials)) + require.Contains(t, pred.Materials[0].URI, "docker/busybox") + + // amd64 + img = imgs.Find("linux/amd64") + require.NotNil(t, img) + require.Equal(t, []byte("alpine\n"), img.Layers[1]["foo"].Data) + + att = imgs.FindAttestation("linux/amd64") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + attest = intoto.Statement{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + stmt = stmtT{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred = stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + args = pred.Invocation.Parameters.Args + require.Equal(t, 2, len(args), "%+v", args) + require.Equal(t, "Moby", args["build-arg:FOO"]) + require.Equal(t, "x86target", args["target"]) + + require.Equal(t, 2, len(pred.Invocation.Parameters.Locals)) + require.Equal(t, 1, len(pred.Materials)) + require.Contains(t, pred.Materials[0].URI, "docker/alpine") +} + +func testClientLLBProvenance(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/clientprovenance:llb" + + f := getFrontend(t, sb) + + _, isClient := f.(*clientFrontend) + if !isClient { + t.Skip("not a client frontend") + } + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.HTTP("https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md") + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + // this also shows up in the provenance + res0, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + dt, err := res0.Ref.ReadFile(ctx, gateway.ReadRequest{ + Filename: "README.md", + }) + if err != nil { + return nil, err + } + + st = llb.Image("alpine").File(llb.Mkfile("/foo", 0600, dt)) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + res1, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + return res1, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=full", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + LocalDirs: map[string]string{}, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + nativePlatform := platforms.Format(platforms.Normalize(platforms.DefaultSpec())) + + img := imgs.Find(nativePlatform) + require.NotNil(t, img) + require.Contains(t, string(img.Layers[1]["foo"].Data), "The Moby Project") + + att := imgs.FindAttestation(nativePlatform) + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + args := pred.Invocation.Parameters.Args + require.Equal(t, 0, len(args), "%+v", args) + require.Equal(t, 0, len(pred.Invocation.Parameters.Locals)) + + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "docker/alpine") + require.Contains(t, pred.Materials[1].URI, "README.md") +} + +func testSecretSSHProvenance(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +RUN --mount=type=secret,id=mysecret --mount=type=secret,id=othersecret --mount=type=ssh echo "ok" > /foo +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + target := registry + "/buildkit/testsecretprovenance:latest" + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=max", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + expPlatform := platforms.Format(platforms.Normalize(platforms.DefaultSpec())) + + img := imgs.Find(expPlatform) + require.NotNil(t, img) + require.Equal(t, []byte("ok\n"), img.Layers[1]["foo"].Data) + + att := imgs.FindAttestation(expPlatform) + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, 2, len(pred.Invocation.Parameters.Secrets), "%+v", pred.Invocation.Parameters.Secrets) + require.Equal(t, "mysecret", pred.Invocation.Parameters.Secrets[0].ID) + require.True(t, pred.Invocation.Parameters.Secrets[0].Optional) + require.Equal(t, "othersecret", pred.Invocation.Parameters.Secrets[1].ID) + require.True(t, pred.Invocation.Parameters.Secrets[1].Optional) + + require.Equal(t, 1, len(pred.Invocation.Parameters.SSH), "%+v", pred.Invocation.Parameters.SSH) + require.Equal(t, "default", pred.Invocation.Parameters.SSH[0].ID) + require.True(t, pred.Invocation.Parameters.SSH[0].Optional) +} diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index f83513e353be..4fd465ddc55a 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs/fstest" intoto "github.com/in-toto/in-toto-golang/in_toto" - slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/builder" @@ -40,7 +39,6 @@ import ( "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/contentutil" - "github.com/moby/buildkit/util/provenance" "github.com/moby/buildkit/util/testutil" "github.com/moby/buildkit/util/testutil/httpserver" "github.com/moby/buildkit/util/testutil/integration" @@ -151,6 +149,10 @@ var allTests = integration.TestFuncs( testSBOMScannerImage, testProvenanceAttestation, testGitProvenanceAttestation, + testMultiPlatformProvenance, + testClientFrontendProvenance, + testClientLLBProvenance, + testSecretSSHProvenance, ) // Tests that depend on the `security.*` entitlements @@ -6157,260 +6159,6 @@ EOF require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) } -func testProvenanceAttestation(t *testing.T, sb integration.Sandbox) { - ctx := sb.Context() - - c, err := client.New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox:latest -RUN echo "ok" > /foo -`) - dir, err := integration.Tmpdir( - t, - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - - for _, mode := range []string{"min", "max"} { - t.Run(mode, func(t *testing.T) { - target := registry + "/buildkit/testwithprovenance:" + mode - provReq := "" - if mode == "max" { - provReq = "mode=max" - } - _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.DefaultLocalNameDockerfile: dir, - builder.DefaultLocalNameContext: dir, - }, - FrontendAttrs: map[string]string{ - "attest:provenance": provReq, - "build-arg:FOO": "bar", - "label:lbl": "abc", - "vcs:source": "https://example.invalid/repo.git", - "vcs:revision": "123456", - }, - Exports: []client.ExportEntry{ - { - Type: client.ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - }, - }, - }, - }, nil) - require.NoError(t, err) - - desc, provider, err := contentutil.ProviderFromRef(target) - require.NoError(t, err) - imgs, err := testutil.ReadImages(sb.Context(), provider, desc) - require.NoError(t, err) - require.Equal(t, 2, len(imgs.Images)) - - img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) - require.NotNil(t, img) - require.Equal(t, []byte("ok\n"), img.Layers[1]["foo"].Data) - - att := imgs.Find("unknown/unknown") - require.NotNil(t, att) - require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) - require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - var attest intoto.Statement - require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) - require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const - - type stmtT struct { - Predicate provenance.ProvenancePredicate `json:"predicate"` - } - var stmt stmtT - require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) - pred := stmt.Predicate - - require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) - require.Equal(t, "", pred.Builder.ID) - - require.Equal(t, slsa.ConfigSource{}, pred.Invocation.ConfigSource) - - switch f.(type) { - case *clientFrontend, *gatewayFrontend: - // TODO: buildinfo broken - default: - params, ok := pred.Invocation.Parameters.(map[string]interface{}) - require.True(t, ok, "%T", pred.Invocation.Parameters) - if mode == "max" { - require.Equal(t, 2, len(params)) - require.True(t, pred.Metadata.Completeness.Parameters) - - require.Equal(t, "bar", params["build-arg:FOO"]) - require.Equal(t, "abc", params["label:lbl"]) - } else { - require.False(t, pred.Metadata.Completeness.Parameters) - require.Equal(t, 0, len(params), "%v", params) - } - - require.Equal(t, "https://example.invalid/repo.git", pred.Metadata.VCS["source"]) - require.Equal(t, "123456", pred.Metadata.VCS["revision"]) - } - - require.Equal(t, 1, len(pred.Materials)) - require.Equal(t, "pkg:docker/busybox@latest", pred.Materials[0].URI) - require.NotEmpty(t, pred.Materials[0].Digest["sha256"]) - - require.NotEmpty(t, pred.Metadata.BuildInvocationID) - - require.NotNil(t, pred.Metadata.BuildFinishedOn) - require.True(t, time.Since(*pred.Metadata.BuildFinishedOn) < 5*time.Minute) - require.NotNil(t, pred.Metadata.BuildStartedOn) - require.True(t, time.Since(*pred.Metadata.BuildStartedOn) < 5*time.Minute) - require.True(t, pred.Metadata.BuildStartedOn.Before(*pred.Metadata.BuildFinishedOn)) - - require.True(t, pred.Metadata.Completeness.Environment) - require.True(t, pred.Metadata.Completeness.Materials) - require.False(t, pred.Metadata.Reproducible) - - if mode == "max" { - require.Equal(t, 2, len(pred.Layers)) - require.NotNil(t, pred.Source) - require.Equal(t, "Dockerfile", pred.Source.Infos[0].Filename) - require.Equal(t, dockerfile, pred.Source.Infos[0].Data) - require.NotNil(t, pred.BuildConfig) - - bc, ok := pred.BuildConfig.(map[string]interface{}) - require.True(t, ok, "wrong type %T", pred.BuildConfig) - - llb, ok := bc["llbDefinition"].([]interface{}) - require.True(t, ok, "wrong buildconfig %+v", bc) - - require.Equal(t, 3, len(llb)) - } else { - require.Equal(t, 0, len(pred.Layers)) - require.Nil(t, pred.Source) - require.Nil(t, pred.BuildConfig) - } - }) - } -} - -func testGitProvenanceAttestation(t *testing.T, sb integration.Sandbox) { - ctx := sb.Context() - - c, err := client.New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox:latest -RUN echo "git" > /foo -COPY myapp.Dockerfile / -`) - dir, err := integration.Tmpdir( - t, - fstest.CreateFile("myapp.Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - - err = runShell(dir, - "git init", - "git config --local user.email test", - "git config --local user.name test", - "git add myapp.Dockerfile", - "git commit -m initial", - "git branch v1", - "git update-server-info", - ) - require.NoError(t, err) - - cmd := exec.Command("git", "rev-parse", "v1") - cmd.Dir = dir - expectedGitSHA, err := cmd.Output() - require.NoError(t, err) - - server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir)))) - defer server.Close() - - target := registry + "/buildkit/testwithprovenance:git" - - _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "context": server.URL + "/.git#v1", - "attest:provenance": "", - "filename": "myapp.Dockerfile", - }, - Exports: []client.ExportEntry{ - { - Type: client.ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - }, - }, - }, - }, nil) - require.NoError(t, err) - - desc, provider, err := contentutil.ProviderFromRef(target) - require.NoError(t, err) - imgs, err := testutil.ReadImages(sb.Context(), provider, desc) - require.NoError(t, err) - require.Equal(t, 2, len(imgs.Images)) - - img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) - require.NotNil(t, img) - require.Equal(t, []byte("git\n"), img.Layers[1]["foo"].Data) - - att := imgs.Find("unknown/unknown") - require.NotNil(t, att) - require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) - require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - var attest intoto.Statement - require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) - require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const - - type stmtT struct { - Predicate provenance.ProvenancePredicate `json:"predicate"` - } - var stmt stmtT - require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) - pred := stmt.Predicate - - switch f.(type) { - case *clientFrontend: - // TODO: buildinfo broken - default: - require.Equal(t, server.URL+"/.git#v1", pred.Invocation.ConfigSource.URI) - require.Equal(t, "myapp.Dockerfile", pred.Invocation.ConfigSource.EntryPoint) - } - - require.Equal(t, 2, len(pred.Materials)) - require.Equal(t, "pkg:docker/busybox@latest", pred.Materials[0].URI) - require.NotEmpty(t, pred.Materials[0].Digest["sha256"]) - - require.Equal(t, strings.Replace(server.URL+"/.git#v1", "http://", "https://", 1), pred.Materials[1].URI) // TODO: buildinfo broken - require.Equal(t, strings.TrimSpace(string(expectedGitSHA)), pred.Materials[1].Digest["sha1"]) -} - func runShell(dir string, cmds ...string) error { for _, args := range cmds { var cmd *exec.Cmd diff --git a/frontend/gateway/gateway.go b/frontend/gateway/gateway.go index 5c14fa2940ff..dfd7e0dd9bb6 100644 --- a/frontend/gateway/gateway.go +++ b/frontend/gateway/gateway.go @@ -39,7 +39,6 @@ import ( opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/stack" "github.com/moby/buildkit/util/tracing" @@ -650,16 +649,6 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { - dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), ref.BuildSources()) - if err != nil { - return nil, err - } - if len(dtbi) > 0 { - if pbRes.Metadata == nil { - pbRes.Metadata = make(map[string][]byte) - } - pbRes.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k)] = dtbi - } lbf.refs[id] = ref } ids[k] = id @@ -683,16 +672,6 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { - dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, exptypes.ExporterBuildInfo, ref.BuildSources()) - if err != nil { - return nil, err - } - if len(dtbi) > 0 { - if pbRes.Metadata == nil { - pbRes.Metadata = make(map[string][]byte) - } - pbRes.Metadata[exptypes.ExporterBuildInfo] = dtbi - } def = ref.Definition() lbf.refs[id] = ref } diff --git a/go.mod b/go.mod index b2483f9cb780..74bad2a99e9b 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/opencontainers/runc v1.1.3 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/opencontainers/selinux v1.10.2 - github.com/package-url/packageurl-go v0.1.0 + github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 github.com/pelletier/go-toml v1.9.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.5.0 diff --git a/go.sum b/go.sum index 7d9488728fcb..5bbf08454b33 100644 --- a/go.sum +++ b/go.sum @@ -1161,8 +1161,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/package-url/packageurl-go v0.1.0 h1:efWBc98O/dBZRg1pw2xiDzovnlMjCa9NPnfaiBduh8I= -github.com/package-url/packageurl-go v0.1.0/go.mod h1:C/ApiuWpmbpni4DIOECf6WCjFUZV7O1Fx7VAzrZHgBw= +github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o= +github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= diff --git a/solver/jobs.go b/solver/jobs.go index 4eb89d2af72c..070b4020b775 100644 --- a/solver/jobs.go +++ b/solver/jobs.go @@ -23,7 +23,7 @@ import ( type ResolveOpFunc func(Vertex, Builder) (Op, error) type Builder interface { - Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) + Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) InContext(ctx context.Context, f func(ctx context.Context, g session.Group) error) error EachValue(ctx context.Context, key string, fn func(interface{}) error) error } @@ -198,16 +198,16 @@ type subBuilder struct { exporters []ExportableCacheKey } -func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { +func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) { // TODO(@crazy-max): Handle BuildInfo from subbuild res, err := sb.solver.subBuild(ctx, e, sb.vtx) if err != nil { - return nil, nil, err + return nil, err } sb.mu.Lock() sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain sb.mu.Unlock() - return res, nil, nil + return &withProvenance{CachedResult: res}, nil } func (sb *subBuilder) InContext(ctx context.Context, f func(context.Context, session.Group) error) error { @@ -499,43 +499,62 @@ func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) { } } -func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { +func (j *Job) Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) { if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { j.span = span } v, err := j.list.load(e.Vertex, nil, j) if err != nil { - return nil, nil, err + return nil, err } e.Vertex = v res, err := j.list.s.build(ctx, e) if err != nil { - return nil, nil, err + return nil, err } j.list.mu.Lock() defer j.list.mu.Unlock() - return res, j.walkBuildSources(ctx, e, make(BuildSources)), nil + return &withProvenance{CachedResult: res, j: j, e: e}, nil } -func (j *Job) walkBuildSources(ctx context.Context, e Edge, bsrc BuildSources) BuildSources { - for _, inp := range e.Vertex.Inputs() { - if st, ok := j.list.actives[inp.Vertex.Digest()]; ok { - st.mu.Lock() - for _, cacheRes := range st.op.cacheRes { - for key, val := range cacheRes.BuildSources { - if _, ok := bsrc[key]; !ok { - bsrc[key] = val - } - } +type withProvenance struct { + CachedResult + j *Job + e Edge +} + +func (wp *withProvenance) WalkProvenance(ctx context.Context, f func(ProvenanceProvider) error) error { + if wp.j == nil { + return nil + } + m := map[digest.Digest]struct{}{} + return wp.j.walkProvenance(ctx, wp.e, f, m) +} + +func (j *Job) walkProvenance(ctx context.Context, e Edge, f func(ProvenanceProvider) error, visited map[digest.Digest]struct{}) error { + if _, ok := visited[e.Vertex.Digest()]; ok { + return nil + } + visited[e.Vertex.Digest()] = struct{}{} + if st, ok := j.list.actives[e.Vertex.Digest()]; ok { + st.mu.Lock() + if wp, ok := st.op.op.(ProvenanceProvider); ok { + if err := f(wp); err != nil { + st.mu.Unlock() + return err } - st.mu.Unlock() - bsrc = j.walkBuildSources(ctx, inp, bsrc) } + st.mu.Unlock() } - return bsrc + for _, inp := range e.Vertex.Inputs() { + if err := j.walkProvenance(ctx, inp, f, visited); err != nil { + return err + } + } + return nil } func (j *Job) Discard() error { diff --git a/solver/llbsolver/bridge.go b/solver/llbsolver/bridge.go index b0ec157b46ea..a7c4ab4efa4a 100644 --- a/solver/llbsolver/bridge.go +++ b/solver/llbsolver/bridge.go @@ -11,7 +11,6 @@ import ( "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" gw "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/identity" @@ -19,9 +18,9 @@ import ( "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/errdefs" llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" @@ -63,20 +62,20 @@ func (b *llbBridge) Warn(ctx context.Context, dgst digest.Digest, msg string, op }) } -func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, solver.BuildSources, error) { +func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResultWithProvenance, error) { w, err := b.resolveWorker() if err != nil { - return nil, nil, err + return nil, err } ent, err := loadEntitlements(b.builder) if err != nil { - return nil, nil, err + return nil, err } var cms []solver.CacheManager for _, im := range cacheImports { cmID, err := cmKey(im) if err != nil { - return nil, nil, err + return nil, err } b.cmsMu.Lock() var cm solver.CacheManager @@ -113,7 +112,7 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), NormalizeRuntimePlatforms(), WithValidateCaps()) if err != nil { - return nil, nil, errors.Wrap(err, "failed to load LLB") + return nil, errors.Wrap(err, "failed to load LLB") } if len(dpc.ids) > 0 { @@ -124,88 +123,44 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp if err := b.eachWorker(func(w worker.Worker) error { return w.PruneCacheMounts(ctx, ids) }); err != nil { - return nil, nil, err - } - } - - res, bi, err := b.builder.Build(ctx, edge) - if err != nil { - return nil, nil, err - } - return res, bi, nil -} - -func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { - if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { - return nil, errors.New("cannot solve with both Definition and Frontend specified") - } - - if req.Definition != nil && req.Definition.Def != nil { - res = &frontend.Result{Ref: newResultProxy(b, req)} - } else if req.Frontend != "" { - f, ok := b.frontends[req.Frontend] - if !ok { - return nil, errors.Errorf("invalid frontend: %s", req.Frontend) - } - res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid, b.sm) - if err != nil { return nil, err } - } else { - return &frontend.Result{}, nil - } - if req.Evaluate { - err = res.EachRef(func(ref solver.ResultProxy) error { - _, err := res.Ref.Result(ctx) - return err - }) } - if len(res.Refs) > 0 { - for p := range res.Refs { - dtbi, err := buildinfo.GetMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p), req.Frontend, req.FrontendOpt) - if err != nil { - return nil, err - } - if len(dtbi) > 0 { - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p), dtbi) - } - } - } else { - dtbi, err := buildinfo.GetMetadata(res.Metadata, exptypes.ExporterBuildInfo, req.Frontend, req.FrontendOpt) - if err != nil { - return nil, err - } - if len(dtbi) > 0 { - res.AddMeta(exptypes.ExporterBuildInfo, dtbi) - } + res, err := b.builder.Build(ctx, edge) + if err != nil { + return nil, err } - - return + return res, nil } type resultProxy struct { - b *llbBridge + id string + b *provenanceBridge req frontend.SolveRequest g flightcontrol.Group mu sync.Mutex released bool v solver.CachedResult - bsrc solver.BuildSources err error errResults []solver.Result + provenance *provenance.Capture +} + +func newResultProxy(b *provenanceBridge, req frontend.SolveRequest) *resultProxy { + return &resultProxy{req: req, b: b, id: identity.NewID()} } -func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy { - return &resultProxy{req: req, b: b} +func (rp *resultProxy) ID() string { + return rp.id } func (rp *resultProxy) Definition() *pb.Definition { return rp.req.Definition } -func (rp *resultProxy) BuildSources() solver.BuildSources { - return rp.bsrc +func (rp *resultProxy) Provenance() interface{} { + return rp.provenance } func (rp *resultProxy) Release(ctx context.Context) (err error) { @@ -251,8 +206,8 @@ func (rp *resultProxy) wrapError(err error) error { return err } -func (rp *resultProxy) loadResult(ctx context.Context) (solver.CachedResult, solver.BuildSources, error) { - res, bsrc, err := rp.b.loadResult(ctx, rp.req.Definition, rp.req.CacheImports) +func (rp *resultProxy) loadResult(ctx context.Context) (solver.CachedResultWithProvenance, error) { + res, err := rp.b.loadResult(ctx, rp.req.Definition, rp.req.CacheImports) var ee *llberrdefs.ExecError if errors.As(err, &ee) { ee.EachRef(func(res solver.Result) error { @@ -262,7 +217,7 @@ func (rp *resultProxy) loadResult(ctx context.Context) (solver.CachedResult, sol // acquire ownership so ExecError finalizer doesn't attempt to release as well ee.OwnerBorrowed = true } - return res, bsrc, err + return res, err } func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err error) { @@ -280,7 +235,7 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return rp.v, rp.err } rp.mu.Unlock() - v, bsrc, err := rp.loadResult(ctx) + v, err := rp.loadResult(ctx) if err != nil { select { case <-ctx.Done(): @@ -299,8 +254,16 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return nil, errors.Errorf("evaluating released result") } rp.v = v - rp.bsrc = bsrc rp.err = err + if err == nil { + capture, err := captureProvenance(ctx, v) + if err != nil && rp.err != nil { + rp.err = errors.Wrapf(rp.err, "failed to capture provenance: %v", err) + v.Release(context.TODO()) + rp.v = nil + } + rp.provenance = capture + } rp.mu.Unlock() return v, err }) diff --git a/solver/llbsolver/ops/build.go b/solver/llbsolver/ops/build.go index 4ada980dce88..fd47df3ae311 100644 --- a/solver/llbsolver/ops/build.go +++ b/solver/llbsolver/ops/build.go @@ -11,7 +11,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" @@ -20,24 +20,26 @@ import ( const buildCacheType = "buildkit.build.v0" -type buildOp struct { +type BuildOp struct { op *pb.BuildOp b frontend.FrontendLLBBridge v solver.Vertex } -func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &BuildOp{} + +func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (*BuildOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } - return &buildOp{ + return &BuildOp{ op: op.Build, b: b, v: v, }, nil } -func (b *buildOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (b *BuildOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { dt, err := json.Marshal(struct { Type string Exec *pb.BuildOp @@ -59,7 +61,7 @@ func (b *buildOp) CacheMap(ctx context.Context, g session.Group, index int) (*so }, true, nil } -func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (outputs []solver.Result, retErr error) { +func (b *BuildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (outputs []solver.Result, retErr error) { if b.op.Builder != pb.LLBBuilder { return nil, errors.Errorf("only LLB builder is currently allowed") } @@ -145,7 +147,9 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return []solver.Result{r}, err } -func (b *buildOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (b *BuildOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { // buildOp itself does not count towards parallelism budget. return func() {}, nil } + +func (b *BuildOp) IsProvenanceProvider() {} diff --git a/solver/llbsolver/ops/diff.go b/solver/llbsolver/ops/diff.go index 82234e7d834c..338a8748e8c6 100644 --- a/solver/llbsolver/ops/diff.go +++ b/solver/llbsolver/ops/diff.go @@ -10,7 +10,7 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) @@ -24,7 +24,7 @@ type diffOp struct { } func NewDiffOp(v solver.Vertex, op *pb.Op_Diff, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &diffOp{ diff --git a/solver/llbsolver/ops/exec.go b/solver/llbsolver/ops/exec.go index 51df9ff375c7..56df35e8a9d6 100644 --- a/solver/llbsolver/ops/exec.go +++ b/solver/llbsolver/ops/exec.go @@ -17,9 +17,9 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/llbsolver/mounts" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/progress/logs" utilsystem "github.com/moby/buildkit/util/system" @@ -33,7 +33,7 @@ import ( const execCacheType = "buildkit.exec.v0" -type execOp struct { +type ExecOp struct { op *pb.ExecOp cm cache.Manager mm *mounts.MountManager @@ -45,12 +45,14 @@ type execOp struct { parallelism *semaphore.Weighted } -func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &ExecOp{} + +func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (*ExecOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } name := fmt.Sprintf("exec %s", strings.Join(op.Exec.Meta.Args, " ")) - return &execOp{ + return &ExecOp{ op: op.Exec, mm: mounts.NewMountManager(name, cm, sm), cm: cm, @@ -63,6 +65,10 @@ func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache. }, nil } +func (e *ExecOp) Proto() *pb.ExecOp { + return e.op +} + func cloneExecOp(old *pb.ExecOp) pb.ExecOp { n := *old meta := *n.Meta @@ -80,7 +86,7 @@ func cloneExecOp(old *pb.ExecOp) pb.ExecOp { return n } -func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (e *ExecOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { op := cloneExecOp(e.op) for i := range op.Meta.ExtraHosts { h := op.Meta.ExtraHosts[i] @@ -157,9 +163,9 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) } if !dep.NoContentBasedHash { - cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) + cm.Deps[i].ComputeDigestFunc = opsutils.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) } - cm.Deps[i].PreprocessFunc = llbsolver.UnlazyResultFunc + cm.Deps[i].PreprocessFunc = unlazyResultFunc } return cm, true, nil @@ -189,10 +195,10 @@ func dedupePaths(inp []string) []string { return paths } -func toSelectors(p []string) []llbsolver.Selector { - sel := make([]llbsolver.Selector, 0, len(p)) +func toSelectors(p []string) []opsutils.Selector { + sel := make([]opsutils.Selector, 0, len(p)) for _, p := range p { - sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true}) + sel = append(sel, opsutils.Selector{Path: p, FollowLinks: true}) } return sel } @@ -202,7 +208,7 @@ type dep struct { NoContentBasedHash bool } -func (e *execOp) getMountDeps() ([]dep, error) { +func (e *ExecOp) getMountDeps() ([]dep, error) { deps := make([]dep, e.numInputs) for _, m := range e.op.Mounts { if m.Input == pb.Empty { @@ -234,7 +240,7 @@ func addDefaultEnvvar(env []string, k, v string) []string { return append(env, k+"="+v) } -func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (results []solver.Result, err error) { +func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (results []solver.Result, err error) { trace.SpanFromContext(ctx).AddEvent("ExecOp started") refs := make([]*worker.WorkerRef, len(inputs)) @@ -393,7 +399,7 @@ func proxyEnvList(p *pb.ProxyEnv) []string { return out } -func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (e *ExecOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { if e.parallelism == nil { return func() {}, nil } @@ -406,7 +412,7 @@ func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { }, nil } -func (e *execOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, error) { +func (e *ExecOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, error) { secretenv := e.op.Secretenv if len(secretenv) == 0 { return nil, nil @@ -436,3 +442,6 @@ func (e *execOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, } return out, nil } + +func (e *ExecOp) IsProvenanceProvider() { +} diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index b1f3e0cfe474..7bbb3276797c 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -13,10 +13,10 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/llbsolver/file" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/worker" @@ -38,7 +38,7 @@ type fileOp struct { } func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *semaphore.Weighted, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &fileOp{ @@ -52,7 +52,7 @@ func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *s } func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { - selectors := map[int][]llbsolver.Selector{} + selectors := map[int][]opsutils.Selector{} invalidSelectors := map[int]struct{}{} actions := make([][]byte, 0, len(f.op.Actions)) @@ -149,10 +149,10 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol }) cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) - cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m)) + cm.Deps[idx].ComputeDigestFunc = opsutils.NewContentHashFunc(dedupeSelectors(m)) } for idx := range cm.Deps { - cm.Deps[idx].PreprocessFunc = llbsolver.UnlazyResultFunc + cm.Deps[idx].PreprocessFunc = unlazyResultFunc } return cm, true, nil @@ -194,8 +194,8 @@ func (f *fileOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { }, nil } -func addSelector(m map[int][]llbsolver.Selector, idx int, sel string, wildcard, followLinks bool, includePatterns, excludePatterns []string) { - s := llbsolver.Selector{ +func addSelector(m map[int][]opsutils.Selector, idx int, sel string, wildcard, followLinks bool, includePatterns, excludePatterns []string) { + s := opsutils.Selector{ Path: sel, FollowLinks: followLinks, Wildcard: wildcard && containsWildcards(sel), @@ -219,7 +219,7 @@ func containsWildcards(name string) bool { return false } -func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { +func dedupeSelectors(m []opsutils.Selector) []opsutils.Selector { paths := make([]string, 0, len(m)) pathsFollow := make([]string, 0, len(m)) for _, sel := range m { @@ -233,13 +233,13 @@ func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { } paths = dedupePaths(paths) pathsFollow = dedupePaths(pathsFollow) - selectors := make([]llbsolver.Selector, 0, len(m)) + selectors := make([]opsutils.Selector, 0, len(m)) for _, p := range paths { - selectors = append(selectors, llbsolver.Selector{Path: p}) + selectors = append(selectors, opsutils.Selector{Path: p}) } for _, p := range pathsFollow { - selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true}) + selectors = append(selectors, opsutils.Selector{Path: p, FollowLinks: true}) } for _, sel := range m { @@ -255,7 +255,7 @@ func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { return selectors } -func processOwner(chopt *pb.ChownOpt, selectors map[int][]llbsolver.Selector) error { +func processOwner(chopt *pb.ChownOpt, selectors map[int][]opsutils.Selector) error { if chopt == nil { return nil } @@ -665,3 +665,14 @@ func isDefaultIndexes(idxs [][]int) bool { } return true } + +func unlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return errors.Errorf("invalid reference: %T", res) + } + if ref.ImmutableRef == nil { + return nil + } + return ref.ImmutableRef.Extract(ctx, g) +} diff --git a/solver/llbsolver/ops/merge.go b/solver/llbsolver/ops/merge.go index 38976f3fbe1b..db1b025bff40 100644 --- a/solver/llbsolver/ops/merge.go +++ b/solver/llbsolver/ops/merge.go @@ -10,7 +10,7 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) @@ -24,7 +24,7 @@ type mergeOp struct { } func NewMergeOp(v solver.Vertex, op *pb.Op_Merge, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &mergeOp{ diff --git a/solver/llbsolver/ops/opsutils/contenthash.go b/solver/llbsolver/ops/opsutils/contenthash.go new file mode 100644 index 000000000000..8bdd8f939e15 --- /dev/null +++ b/solver/llbsolver/ops/opsutils/contenthash.go @@ -0,0 +1,71 @@ +package opsutils + +import ( + "bytes" + "context" + "path" + + "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type Selector struct { + Path string + Wildcard bool + FollowLinks bool + IncludePatterns []string + ExcludePatterns []string +} + +func (sel Selector) HasWildcardOrFilters() bool { + return sel.Wildcard || len(sel.IncludePatterns) != 0 || len(sel.ExcludePatterns) != 0 +} + +func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { + return func(ctx context.Context, res solver.Result, s session.Group) (digest.Digest, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return "", errors.Errorf("invalid reference: %T", res) + } + + if len(selectors) == 0 { + selectors = []Selector{{}} + } + + dgsts := make([][]byte, len(selectors)) + + eg, ctx := errgroup.WithContext(ctx) + + for i, sel := range selectors { + i, sel := i, sel + eg.Go(func() error { + dgst, err := contenthash.Checksum( + ctx, ref.ImmutableRef, path.Join("/", sel.Path), + contenthash.ChecksumOpts{ + Wildcard: sel.Wildcard, + FollowLinks: sel.FollowLinks, + IncludePatterns: sel.IncludePatterns, + ExcludePatterns: sel.ExcludePatterns, + }, + s, + ) + if err != nil { + return errors.Wrapf(err, "failed to calculate checksum of ref %s", ref.ID()) + } + dgsts[i] = []byte(dgst) + return nil + }) + } + + if err := eg.Wait(); err != nil { + return "", err + } + + return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil + } +} diff --git a/solver/llbsolver/ops/opsutils/validate.go b/solver/llbsolver/ops/opsutils/validate.go new file mode 100644 index 000000000000..8e0d30d9ecf3 --- /dev/null +++ b/solver/llbsolver/ops/opsutils/validate.go @@ -0,0 +1,63 @@ +package opsutils + +import ( + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" +) + +func Validate(op *pb.Op) error { + if op == nil { + return errors.Errorf("invalid nil op") + } + + switch op := op.Op.(type) { + case *pb.Op_Source: + if op.Source == nil { + return errors.Errorf("invalid nil source op") + } + case *pb.Op_Exec: + if op.Exec == nil { + return errors.Errorf("invalid nil exec op") + } + if op.Exec.Meta == nil { + return errors.Errorf("invalid exec op with no meta") + } + if len(op.Exec.Meta.Args) == 0 { + return errors.Errorf("invalid exec op with no args") + } + if len(op.Exec.Mounts) == 0 { + return errors.Errorf("invalid exec op with no mounts") + } + + isRoot := false + for _, m := range op.Exec.Mounts { + if m.Dest == pb.RootMount { + isRoot = true + break + } + } + if !isRoot { + return errors.Errorf("invalid exec op with no rootfs") + } + case *pb.Op_File: + if op.File == nil { + return errors.Errorf("invalid nil file op") + } + if len(op.File.Actions) == 0 { + return errors.Errorf("invalid file op with no actions") + } + case *pb.Op_Build: + if op.Build == nil { + return errors.Errorf("invalid nil build op") + } + case *pb.Op_Merge: + if op.Merge == nil { + return errors.Errorf("invalid nil merge op") + } + case *pb.Op_Diff: + if op.Diff == nil { + return errors.Errorf("invalid nil diff op") + } + } + return nil +} diff --git a/solver/llbsolver/ops/source.go b/solver/llbsolver/ops/source.go index d24a902da570..fabd300d4b5c 100644 --- a/solver/llbsolver/ops/source.go +++ b/solver/llbsolver/ops/source.go @@ -7,7 +7,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" "github.com/moby/buildkit/worker" @@ -17,7 +17,7 @@ import ( const sourceCacheType = "buildkit.source.v0" -type sourceOp struct { +type SourceOp struct { mu sync.Mutex op *pb.Op_Source platform *pb.Platform @@ -27,13 +27,17 @@ type sourceOp struct { w worker.Worker vtx solver.Vertex parallelism *semaphore.Weighted + pin string + id source.Identifier } -func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, parallelism *semaphore.Weighted, sessM *session.Manager, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &SourceOp{} + +func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, parallelism *semaphore.Weighted, sessM *session.Manager, w worker.Worker) (*SourceOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } - return &sourceOp{ + return &SourceOp{ op: op, sm: sm, w: w, @@ -44,7 +48,13 @@ func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm }, nil } -func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) { +func (s *SourceOp) IsProvenanceProvider() {} + +func (s *SourceOp) Pin() (source.Identifier, string) { + return s.id, s.pin +} + +func (s *SourceOp) instance(ctx context.Context) (source.SourceInstance, error) { s.mu.Lock() defer s.mu.Unlock() if s.src != nil { @@ -59,10 +69,11 @@ func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) return nil, err } s.src = src + s.id = id return s.src, nil } -func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (s *SourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { src, err := s.instance(ctx) if err != nil { return nil, false, err @@ -73,25 +84,23 @@ func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*s return nil, false, err } + if s.pin == "" { + s.pin = pin + } + dgst := digest.FromBytes([]byte(sourceCacheType + ":" + k)) if strings.HasPrefix(k, "session:") { dgst = digest.Digest("random:" + strings.TrimPrefix(dgst.String(), dgst.Algorithm().String()+":")) } - var buildSources map[string]string - if !strings.HasPrefix(s.op.Source.GetIdentifier(), "local://") { - buildSources = map[string]string{s.op.Source.GetIdentifier(): pin} - } - return &solver.CacheMap{ // TODO: add os/arch - Digest: dgst, - Opts: cacheOpts, - BuildSources: buildSources, + Digest: dgst, + Opts: cacheOpts, }, done, nil } -func (s *sourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) (outputs []solver.Result, err error) { +func (s *SourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) (outputs []solver.Result, err error) { src, err := s.instance(ctx) if err != nil { return nil, err @@ -103,7 +112,7 @@ func (s *sourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil } -func (s *sourceOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (s *SourceOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { if s.parallelism == nil { return func() {}, nil } diff --git a/solver/llbsolver/proc/provenance.go b/solver/llbsolver/proc/provenance.go index 3a1d19f31af3..4186082dc7e6 100644 --- a/solver/llbsolver/proc/provenance.go +++ b/solver/llbsolver/proc/provenance.go @@ -13,24 +13,20 @@ import ( "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter/containerimage" "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/frontend" gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/solver/result" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - provenance "github.com/moby/buildkit/util/provenance" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -var BuildKitBuildType = "https://mobyproject.org/buildkit@v1" - func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { - return func(ctx context.Context, res *frontend.Result, s *llbsolver.Solver, j *solver.Job) (*frontend.Result, error) { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { if len(res.Refs) == 0 { return nil, errors.New("provided result has no refs") } @@ -43,7 +39,7 @@ func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { var ps exptypes.Platforms if len(platformsBytes) > 0 { if err := json.Unmarshal(platformsBytes, &ps); err != nil { - return nil, errors.Wrapf(err, "failed to parse platforms passed to sbom processor") + return nil, errors.Wrapf(err, "failed to parse platforms passed to provenance processor") } } @@ -73,17 +69,12 @@ func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { } for _, p := range ps.Platforms { - dt, ok := res.Metadata[exptypes.ExporterBuildInfo+"/"+p.ID] + cp, ok := res.Provenance.Refs[p.ID] if !ok { return nil, errors.New("no build info found for provenance") } - var bi binfotypes.BuildInfo - if err := json.Unmarshal(dt, &bi); err != nil { - return nil, errors.Wrap(err, "failed to parse build info") - } - - pr, err := provenance.FromBuildInfo(bi) + pr, err := provenance.NewPredicate(cp) if err != nil { return nil, err } @@ -97,15 +88,17 @@ func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { var addLayers func() error if mode != "max" { - param := make(map[string]*string) - for k, v := range pr.Invocation.Parameters.(map[string]*string) { + args := make(map[string]string) + for k, v := range pr.Invocation.Parameters.Args { if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") { pr.Metadata.Completeness.Parameters = false continue } - param[k] = v + args[k] = v } - pr.Invocation.Parameters = param + pr.Invocation.Parameters.Args = args + pr.Invocation.Parameters.Secrets = nil + pr.Invocation.Parameters.SSH = nil } else { dgsts, err := provenance.AddBuildConfig(ctx, pr, res.Refs[p.ID]) if err != nil { @@ -139,7 +132,11 @@ func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { } if len(m) != 0 { - pr.Layers = m + if pr.Metadata == nil { + pr.Metadata = &provenance.ProvenanceMetadata{} + } + + pr.Metadata.BuildKitMetadata.Layers = m } return nil diff --git a/solver/llbsolver/proc/refs.go b/solver/llbsolver/proc/refs.go index 9b6475b77e9b..96ffa211c1f1 100644 --- a/solver/llbsolver/proc/refs.go +++ b/solver/llbsolver/proc/refs.go @@ -8,9 +8,9 @@ import ( "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/provenance" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -19,7 +19,7 @@ import ( // // This is useful for cases where a frontend produces a single-platform image, // but we need to add additional Refs to it (e.g. attestations). -func ForceRefsProcessor(ctx context.Context, result *frontend.Result, s *llbsolver.Solver, j *solver.Job) (*frontend.Result, error) { +func ForceRefsProcessor(ctx context.Context, result *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { if len(result.Refs) > 0 { return result, nil } @@ -72,5 +72,10 @@ func ForceRefsProcessor(ctx context.Context, result *frontend.Result, s *llbsolv } result.AddMeta(exptypes.ExporterPlatformsKey, dt) + result.Provenance.Refs = map[string]*provenance.Capture{ + pk: result.Provenance.Ref, + } + result.Provenance.Ref = nil + return result, nil } diff --git a/solver/llbsolver/proc/sbom.go b/solver/llbsolver/proc/sbom.go index 3b66f7693a24..c055fed1f85c 100644 --- a/solver/llbsolver/proc/sbom.go +++ b/solver/llbsolver/proc/sbom.go @@ -14,9 +14,9 @@ import ( ) func SBOMProcessor(scannerRef string) llbsolver.Processor { - return func(ctx context.Context, res *frontend.Result, s *llbsolver.Solver, j *solver.Job) (*frontend.Result, error) { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { // skip sbom generation if we already have an sbom - if attest.HasSBOM(res) { + if attest.HasSBOM(res.Result) { return res, nil } @@ -61,7 +61,7 @@ func SBOMProcessor(scannerRef string) llbsolver.Processor { return nil, err } - r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ + r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ // TODO: buildinfo Definition: def.ToPB(), }, j.SessionID) if err != nil { diff --git a/solver/llbsolver/provenance.go b/solver/llbsolver/provenance.go new file mode 100644 index 000000000000..923703021cdf --- /dev/null +++ b/solver/llbsolver/provenance.go @@ -0,0 +1,342 @@ +package llbsolver + +import ( + "context" + "encoding/json" + "sync" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops" + "github.com/moby/buildkit/solver/llbsolver/provenance" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type resultWithBridge struct { + res *frontend.Result + bridge *provenanceBridge +} + +// provenanceBridge provides scoped access to LLBBridge and captures the request it makes for provenance +type provenanceBridge struct { + *llbBridge + mu sync.Mutex + req *frontend.SolveRequest + + images []provenance.ImageSource + builds []resultWithBridge + subBridges []*provenanceBridge +} + +func (b *provenanceBridge) eachRef(f func(r solver.ResultProxy) error) error { + for _, b := range b.builds { + if err := b.res.EachRef(f); err != nil { + return err + } + } + for _, b := range b.subBridges { + if err := b.eachRef(f); err != nil { + return err + } + } + return nil +} + +func (b *provenanceBridge) allImages() []provenance.ImageSource { + res := make([]provenance.ImageSource, 0, len(b.images)) + res = append(res, b.images...) + for _, sb := range b.subBridges { + res = append(res, sb.allImages()...) + } + return res +} + +func (b *provenanceBridge) requests(r *frontend.Result) (*resultRequests, error) { + reqs := &resultRequests{refs: make(map[string]*resultWithBridge)} + + if r.Ref != nil { + ref, ok := b.findByResult(r.Ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", r.Ref.ID()) + } + reqs.ref = ref + } + + for k, ref := range r.Refs { + r, ok := b.findByResult(ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", ref.ID()) + } + reqs.refs[k] = r + } + + if platformsBytes, ok := r.Metadata[exptypes.ExporterPlatformsKey]; ok { + var ps exptypes.Platforms + if len(platformsBytes) > 0 { + if err := json.Unmarshal(platformsBytes, &ps); err != nil { + return nil, errors.Wrapf(err, "failed to parse platforms passed to provenance processor") + } + reqs.platforms = ps.Platforms + } + } + + return reqs, nil +} + +func (b *provenanceBridge) findByResult(rp solver.ResultProxy) (*resultWithBridge, bool) { + for _, br := range b.subBridges { + if req, ok := br.findByResult(rp); ok { + return req, true + } + } + for _, bld := range b.builds { + if bld.res.Ref != nil { + if bld.res.Ref.ID() == rp.ID() { + return &bld, true + } + } + for _, ref := range bld.res.Refs { + if ref.ID() == rp.ID() { + return &bld, true + } + } + } + return nil, false +} + +func (b *provenanceBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { + dgst, config, err = b.llbBridge.ResolveImageConfig(ctx, ref, opt) + if err != nil { + return "", nil, err + } + + b.images = append(b.images, provenance.ImageSource{ + Ref: ref, + Platform: opt.Platform, + Digest: dgst, + }) + return dgst, config, nil +} + +func (b *provenanceBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { + if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { + return nil, errors.New("cannot solve with both Definition and Frontend specified") + } + + if req.Definition != nil && req.Definition.Def != nil { + rp := newResultProxy(b, req) + res = &frontend.Result{Ref: rp} + b.mu.Lock() + b.builds = append(b.builds, resultWithBridge{res: res, bridge: b}) + b.mu.Unlock() + } else if req.Frontend != "" { + f, ok := b.llbBridge.frontends[req.Frontend] + if !ok { + return nil, errors.Errorf("invalid frontend: %s", req.Frontend) + } + wb := &provenanceBridge{llbBridge: b.llbBridge, req: &req} + res, err = f.Solve(ctx, wb, req.FrontendOpt, req.FrontendInputs, sid, b.llbBridge.sm) + if err != nil { + return nil, err + } + wb.builds = append(wb.builds, resultWithBridge{res: res, bridge: wb}) + b.mu.Lock() + b.subBridges = append(b.subBridges, wb) + b.mu.Unlock() + } else { + return &frontend.Result{}, nil + } + if req.Evaluate { + err = res.EachRef(func(ref solver.ResultProxy) error { + _, err := res.Ref.Result(ctx) + return err + }) + } + return +} + +type resultRequests struct { + ref *resultWithBridge + refs map[string]*resultWithBridge + platforms []exptypes.Platform +} + +// filterImagePlatforms filter out images that not for the current platform if an image existist for every platform in a result +func (reqs *resultRequests) filterImagePlatforms(k string, imgs []provenance.ImageSource) []provenance.ImageSource { + if len(reqs.platforms) == 0 { + return imgs + } + m := map[string]string{} + for _, img := range imgs { + if _, ok := m[img.Ref]; ok { + continue + } + hasPlatform := true + for _, p := range reqs.platforms { + matcher := platforms.NewMatcher(p.Platform) + found := false + for _, img2 := range imgs { + if img.Ref == img2.Ref && img2.Platform != nil { + if matcher.Match(*img2.Platform) { + found = true + break + } + } + } + if !found { + hasPlatform = false + break + } + } + if hasPlatform { + m[img.Ref] = img.Ref + } + } + + var current ocispecs.Platform + for _, p := range reqs.platforms { + if p.ID == k { + current = p.Platform + } + } + + out := make([]provenance.ImageSource, 0, len(imgs)) + for _, img := range imgs { + if _, ok := m[img.Ref]; ok && img.Platform != nil { + if current.OS == img.Platform.OS && current.Architecture == img.Platform.Architecture { + out = append(out, img) + } + } else { + out = append(out, img) + } + } + return out +} + +func (reqs *resultRequests) allRes() map[string]struct{} { + res := make(map[string]struct{}) + if reqs.ref != nil { + res[reqs.ref.res.Ref.ID()] = struct{}{} + } + for _, r := range reqs.refs { + res[r.res.Ref.ID()] = struct{}{} + } + return res +} + +func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenance) (*provenance.Capture, error) { + if res == nil { + return nil, nil + } + c := &provenance.Capture{} + + err := res.WalkProvenance(ctx, func(pp solver.ProvenanceProvider) error { + switch op := pp.(type) { + case *ops.SourceOp: + id, pin := op.Pin() + switch s := id.(type) { + case *source.ImageIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse image digest %s", pin) + } + c.AddImage(provenance.ImageSource{ + Ref: s.Reference.String(), + Platform: s.Platform, + Digest: dgst, + }) + case *source.LocalIdentifier: + c.AddLocal(provenance.LocalSource{ + Name: s.Name, + }) + case *source.GitIdentifier: + url := s.Remote + if s.Ref != "" { + url += "#" + s.Ref + } + c.AddGit(provenance.GitSource{ + URL: url, + Commit: pin, + }) + if s.AuthTokenSecret != "" { + c.AddSecret(provenance.Secret{ + ID: s.AuthTokenSecret, + Optional: true, + }) + } + if s.AuthHeaderSecret != "" { + c.AddSecret(provenance.Secret{ + ID: s.AuthHeaderSecret, + Optional: true, + }) + } + if s.MountSSHSock != "" { + c.AddSSH(provenance.SSH{ + ID: s.MountSSHSock, + Optional: true, + }) + } + case *source.HTTPIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse HTTP digest %s", pin) + } + c.AddHTTP(provenance.HTTPSource{ + URL: s.URL, + Digest: dgst, + }) + case *source.OCIIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse OCI digest %s", pin) + } + c.AddLocalImage(provenance.ImageSource{ + Ref: s.Name, + Platform: s.Platform, + Digest: dgst, + }) + default: + return errors.Errorf("unknown source identifier %T", id) + } + case *ops.ExecOp: + pr := op.Proto() + for _, m := range pr.Mounts { + if m.MountType == pb.MountType_SECRET { + c.AddSecret(provenance.Secret{ + ID: m.SecretOpt.GetID(), + Optional: m.SecretOpt.GetOptional(), + }) + } + if m.MountType == pb.MountType_SSH { + c.AddSSH(provenance.SSH{ + ID: m.SSHOpt.GetID(), + Optional: m.SSHOpt.GetOptional(), + }) + } + } + for _, se := range pr.Secretenv { + c.AddSecret(provenance.Secret{ + ID: se.GetID(), + Optional: se.GetOptional(), + }) + } + if pr.Network != pb.NetMode_NONE { + c.NetworkAccess = true + } + case *ops.BuildOp: + c.IncompleteMaterials = true // not supported yet + } + return nil + }) + if err != nil { + return nil, err + } + return c, err +} diff --git a/util/provenance/buildconfig.go b/solver/llbsolver/provenance/buildconfig.go similarity index 95% rename from util/provenance/buildconfig.go rename to solver/llbsolver/provenance/buildconfig.go index 90e7cd05279a..26148fb99b5d 100644 --- a/util/provenance/buildconfig.go +++ b/solver/llbsolver/provenance/buildconfig.go @@ -15,7 +15,7 @@ type BuildConfig struct { } type BuildStep struct { - ID string `json:"ID,omitempty"` + ID string `json:"id,omitempty"` Op interface{} `json:"op,omitempty"` Inputs []string `json:"inputs,omitempty"` } @@ -69,7 +69,10 @@ func AddBuildConfig(ctx context.Context, p *ProvenancePredicate, rp solver.Resul locs[fmt.Sprintf("step%d", idx)] = l } - p.Source = &Source{ + if p.Metadata == nil { + p.Metadata = &ProvenanceMetadata{} + } + p.Metadata.BuildKitMetadata.Source = &Source{ Infos: sis, Locations: locs, } diff --git a/solver/llbsolver/provenance/capture.go b/solver/llbsolver/provenance/capture.go new file mode 100644 index 000000000000..3d94e8f5c2f3 --- /dev/null +++ b/solver/llbsolver/provenance/capture.go @@ -0,0 +1,250 @@ +package provenance + +import ( + "sort" + + distreference "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Result struct { + Ref *Capture + Refs map[string]*Capture +} + +type ImageSource struct { + Ref string + Platform *ocispecs.Platform + Digest digest.Digest +} + +type GitSource struct { + URL string + Commit string +} + +type HTTPSource struct { + URL string + Digest digest.Digest +} + +type LocalSource struct { + Name string `json:"name"` +} + +type Secret struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type SSH struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type Sources struct { + Images []ImageSource + LocalImages []ImageSource + Git []GitSource + HTTP []HTTPSource + Local []LocalSource +} + +type Capture struct { + Frontend string + Args map[string]string + Sources Sources + Secrets []Secret + SSH []SSH + NetworkAccess bool + IncompleteMaterials bool +} + +func (c *Capture) Merge(c2 *Capture) error { + if c2 == nil { + return nil + } + for _, i := range c2.Sources.Images { + c.AddImage(i) + } + for _, i := range c2.Sources.LocalImages { + c.AddLocalImage(i) + } + for _, l := range c2.Sources.Local { + c.AddLocal(l) + } + for _, g := range c2.Sources.Git { + c.AddGit(g) + } + for _, h := range c2.Sources.HTTP { + c.AddHTTP(h) + } + for _, s := range c2.Secrets { + c.AddSecret(s) + } + for _, s := range c2.SSH { + c.AddSSH(s) + } + if c2.NetworkAccess { + c.NetworkAccess = true + } + if c2.IncompleteMaterials { + c.IncompleteMaterials = true + } + return nil +} + +func (c *Capture) Sort() { + sort.Slice(c.Sources.Images, func(i, j int) bool { + return c.Sources.Images[i].Ref < c.Sources.Images[j].Ref + }) + sort.Slice(c.Sources.LocalImages, func(i, j int) bool { + return c.Sources.LocalImages[i].Ref < c.Sources.LocalImages[j].Ref + }) + sort.Slice(c.Sources.Local, func(i, j int) bool { + return c.Sources.Local[i].Name < c.Sources.Local[j].Name + }) + sort.Slice(c.Sources.Git, func(i, j int) bool { + return c.Sources.Git[i].URL < c.Sources.Git[j].URL + }) + sort.Slice(c.Sources.HTTP, func(i, j int) bool { + return c.Sources.HTTP[i].URL < c.Sources.HTTP[j].URL + }) + sort.Slice(c.Secrets, func(i, j int) bool { + return c.Secrets[i].ID < c.Secrets[j].ID + }) + sort.Slice(c.SSH, func(i, j int) bool { + return c.SSH[i].ID < c.SSH[j].ID + }) +} + +// OptimizeImageSources filters out image sources by digest reference if same digest +// is already present by a tag reference. +func (c *Capture) OptimizeImageSources() error { + m := map[string]struct{}{} + for _, i := range c.Sources.Images { + ref, nameTag, err := parseRefName(i.Ref) + if err != nil { + return err + } + if _, ok := ref.(distreference.Canonical); !ok { + m[nameTag] = struct{}{} + } + } + + images := make([]ImageSource, 0, len(c.Sources.Images)) + for _, i := range c.Sources.Images { + ref, nameTag, err := parseRefName(i.Ref) + if err != nil { + return err + } + if _, ok := ref.(distreference.Canonical); ok { + if _, ok := m[nameTag]; ok { + continue + } + } + images = append(images, i) + } + c.Sources.Images = images + return nil +} + +func (c *Capture) AddImage(i ImageSource) { + for _, v := range c.Sources.Images { + if v.Ref == i.Ref { + if v.Platform == i.Platform { + return + } + if v.Platform != nil && i.Platform != nil { + if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { + return + } + } + } + } + c.Sources.Images = append(c.Sources.Images, i) +} + +func (c *Capture) AddLocalImage(i ImageSource) { + for _, v := range c.Sources.LocalImages { + if v.Ref == i.Ref { + if v.Platform == i.Platform { + return + } + if v.Platform != nil && i.Platform != nil { + if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { + return + } + } + } + } + c.Sources.LocalImages = append(c.Sources.LocalImages, i) +} + +func (c *Capture) AddLocal(l LocalSource) { + for _, v := range c.Sources.Local { + if v.Name == l.Name { + return + } + } + c.Sources.Local = append(c.Sources.Local, l) +} + +func (c *Capture) AddGit(g GitSource) { + for _, v := range c.Sources.Git { + if v.URL == g.URL { + return + } + } + c.Sources.Git = append(c.Sources.Git, g) +} + +func (c *Capture) AddHTTP(h HTTPSource) { + for _, v := range c.Sources.HTTP { + if v.URL == h.URL { + return + } + } + c.Sources.HTTP = append(c.Sources.HTTP, h) +} + +func (c *Capture) AddSecret(s Secret) { + for i, v := range c.Secrets { + if v.ID == s.ID { + if !s.Optional { + c.Secrets[i].Optional = false + } + return + } + } + c.Secrets = append(c.Secrets, s) +} + +func (c *Capture) AddSSH(s SSH) { + if s.ID == "" { + s.ID = "default" + } + for i, v := range c.SSH { + if v.ID == s.ID { + if !s.Optional { + c.SSH[i].Optional = false + } + return + } + } + c.SSH = append(c.SSH, s) +} + +func parseRefName(s string) (distreference.Named, string, error) { + ref, err := distreference.ParseNormalizedNamed(s) + if err != nil { + return nil, "", err + } + name := ref.Name() + tag := "latest" + if r, ok := ref.(distreference.Tagged); ok { + tag = r.Tag() + } + return ref, name + ":" + tag, nil +} diff --git a/solver/llbsolver/provenance/predicate.go b/solver/llbsolver/provenance/predicate.go new file mode 100644 index 000000000000..8e5593e62550 --- /dev/null +++ b/solver/llbsolver/provenance/predicate.go @@ -0,0 +1,248 @@ +package provenance + +import ( + "strings" + + "github.com/containerd/containerd/platforms" + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/moby/buildkit/util/purl" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/package-url/packageurl-go" +) + +const ( + BuildKitBuildType = "https://mobyproject.org/buildkit@v1" +) + +type ProvenancePredicate struct { + slsa.ProvenancePredicate + Invocation ProvenanceInvocation `json:"invocation,omitempty"` + BuildConfig *BuildConfig `json:"buildConfig,omitempty"` + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` +} + +type ProvenanceInvocation struct { + ConfigSource slsa.ConfigSource `json:"configSource,omitempty"` + Parameters Parameters `json:"parameters,omitempty"` + Environment Environment `json:"environment,omitempty"` +} + +type Parameters struct { + Frontend string `json:"frontend,omitempty"` + Args map[string]string `json:"args,omitempty"` + Secrets []*Secret `json:"secrets,omitempty"` + SSH []*SSH `json:"ssh,omitempty"` + Locals []*LocalSource `json:"locals,omitempty"` + // TODO: select export attributes + // TODO: frontend inputs +} + +type Environment struct { + Platform string `json:"platform"` +} + +type ProvenanceMetadata struct { + slsa.ProvenanceMetadata + Completeness ProvenanceComplete `json:"completeness"` + BuildKitMetadata BuildKitMetadata `json:"https://mobyproject.org/buildkit@v1#metadata,omitempty"` +} + +type ProvenanceComplete struct { + slsa.ProvenanceComplete + Hermetic bool `json:"https://mobyproject.org/buildkit@v1#hermetic,omitempty"` +} + +type BuildKitMetadata struct { + VCS map[string]string `json:"vcs,omitempty"` + Source *Source `json:"source,omitempty"` + Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` +} + +func slsaMaterials(srcs Sources) ([]slsa.ProvenanceMaterial, error) { + count := len(srcs.Images) + len(srcs.Git) + len(srcs.HTTP) + len(srcs.LocalImages) + out := make([]slsa.ProvenanceMaterial, 0, count) + + for _, s := range srcs.Images { + uri, err := purl.RefToPURL(s.Ref, s.Platform) + if err != nil { + return nil, err + } + out = append(out, slsa.ProvenanceMaterial{ + URI: uri, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + + for _, s := range srcs.Git { + out = append(out, slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + "sha1": s.Commit, + }, + }) + } + + for _, s := range srcs.HTTP { + out = append(out, slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + + for _, s := range srcs.LocalImages { + q := []packageurl.Qualifier{} + if s.Platform != nil { + q = append(q, packageurl.Qualifier{ + Key: "platform", + Value: platforms.Format(*s.Platform), + }) + } + packageurl.NewPackageURL(packageurl.TypeOCI, "", s.Ref, "", q, "") + out = append(out, slsa.ProvenanceMaterial{ + URI: s.Ref, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + return out, nil +} + +func findMaterial(srcs Sources, uri string) (*slsa.ProvenanceMaterial, bool) { + for _, s := range srcs.Git { + if s.URL == uri { + return &slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + "sha1": s.Commit, + }, + }, true + } + } + for _, s := range srcs.HTTP { + if s.URL == uri { + return &slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }, true + } + } + return nil, false +} + +func NewPredicate(c *Capture) (*ProvenancePredicate, error) { + materials, err := slsaMaterials(c.Sources) + if err != nil { + return nil, err + } + inv := ProvenanceInvocation{} + + contextKey := "context" + if v, ok := c.Args["contextkey"]; ok && v != "" { + contextKey = v + } + + if v, ok := c.Args[contextKey]; ok && v != "" { + if m, ok := findMaterial(c.Sources, v); ok { + inv.ConfigSource.URI = m.URI + inv.ConfigSource.Digest = m.Digest + } else { + inv.ConfigSource.URI = v + } + delete(c.Args, contextKey) + } + + if v, ok := c.Args["filename"]; ok && v != "" { + inv.ConfigSource.EntryPoint = v + delete(c.Args, "filename") + } + + vcs := make(map[string]string) + for k, v := range c.Args { + if strings.HasPrefix(k, "vcs:") { + delete(c.Args, k) + if v != "" { + vcs[strings.TrimPrefix(k, "vcs:")] = v + } + } + } + + inv.Environment.Platform = platforms.Format(platforms.Normalize(platforms.DefaultSpec())) + + inv.Parameters.Frontend = c.Frontend + inv.Parameters.Args = c.Args + + for _, s := range c.Secrets { + inv.Parameters.Secrets = append(inv.Parameters.Secrets, &Secret{ + ID: s.ID, + Optional: s.Optional, + }) + } + for _, s := range c.SSH { + inv.Parameters.SSH = append(inv.Parameters.SSH, &SSH{ + ID: s.ID, + Optional: s.Optional, + }) + } + for _, s := range c.Sources.Local { + inv.Parameters.Locals = append(inv.Parameters.Locals, &LocalSource{ + Name: s.Name, + }) + } + + incompleteMaterials := c.IncompleteMaterials + if !incompleteMaterials { + if len(c.Sources.Local) > 0 { + incompleteMaterials = true + } + } + + pr := &ProvenancePredicate{ + Invocation: inv, + ProvenancePredicate: slsa.ProvenancePredicate{ + BuildType: BuildKitBuildType, + Materials: materials, + }, + Metadata: &ProvenanceMetadata{ + Completeness: ProvenanceComplete{ + ProvenanceComplete: slsa.ProvenanceComplete{ + Parameters: c.Frontend != "", + Environment: true, + Materials: !incompleteMaterials, + }, + Hermetic: !incompleteMaterials && !c.NetworkAccess, + }, + }, + } + + if len(vcs) > 0 { + pr.Metadata.BuildKitMetadata.VCS = vcs + } + + return pr, nil +} + +func FilterArgs(m map[string]string) map[string]string { + var hostSpecificArgs = map[string]struct{}{ + "cgroup-parent": {}, + "image-resolve-mode": {}, + "platform": {}, + } + out := make(map[string]string) + for k, v := range m { + if _, ok := hostSpecificArgs[k]; ok { + continue + } + if strings.HasPrefix(k, "attest:") { + continue + } + out[k] = v + } + return out +} diff --git a/solver/llbsolver/result.go b/solver/llbsolver/result.go index 0cadda547d54..7cd08754ae84 100644 --- a/solver/llbsolver/result.go +++ b/solver/llbsolver/result.go @@ -1,85 +1,20 @@ package llbsolver import ( - "bytes" "context" - "path" cacheconfig "github.com/moby/buildkit/cache/config" - "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "golang.org/x/sync/errgroup" ) -type Selector struct { - Path string - Wildcard bool - FollowLinks bool - IncludePatterns []string - ExcludePatterns []string -} - -func (sel Selector) HasWildcardOrFilters() bool { - return sel.Wildcard || len(sel.IncludePatterns) != 0 || len(sel.ExcludePatterns) != 0 -} - -func UnlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return errors.Errorf("invalid reference: %T", res) - } - if ref.ImmutableRef == nil { - return nil - } - return ref.ImmutableRef.Extract(ctx, g) -} - -func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { - return func(ctx context.Context, res solver.Result, s session.Group) (digest.Digest, error) { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return "", errors.Errorf("invalid reference: %T", res) - } - - if len(selectors) == 0 { - selectors = []Selector{{}} - } - - dgsts := make([][]byte, len(selectors)) - - eg, ctx := errgroup.WithContext(ctx) - - for i, sel := range selectors { - i, sel := i, sel - eg.Go(func() error { - dgst, err := contenthash.Checksum( - ctx, ref.ImmutableRef, path.Join("/", sel.Path), - contenthash.ChecksumOpts{ - Wildcard: sel.Wildcard, - FollowLinks: sel.FollowLinks, - IncludePatterns: sel.IncludePatterns, - ExcludePatterns: sel.ExcludePatterns, - }, - s, - ) - if err != nil { - return errors.Wrapf(err, "failed to calculate checksum of ref %s", ref.ID()) - } - dgsts[i] = []byte(dgst) - return nil - }) - } - - if err := eg.Wait(); err != nil { - return "", err - } - - return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil - } +type Result struct { + *frontend.Result + Provenance *provenance.Result } func workerRefResolver(refCfg cacheconfig.RefConfig, all bool, g session.Group) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { diff --git a/solver/llbsolver/solver.go b/solver/llbsolver/solver.go index e4d9ff4a56eb..56d5622a212c 100644 --- a/solver/llbsolver/solver.go +++ b/solver/llbsolver/solver.go @@ -19,6 +19,7 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/compression" @@ -70,7 +71,7 @@ type Solver struct { // Processor defines a processing function to be applied after solving, but // before exporting -type Processor func(ctx context.Context, result *frontend.Result, s *Solver, j *solver.Job) (*frontend.Result, error) +type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job) (*Result, error) func New(opt Opt) (*Solver, error) { s := &Solver{ @@ -101,8 +102,8 @@ func (s *Solver) resolver() solver.ResolveOpFunc { } } -func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { - return &llbBridge{ +func (s *Solver) bridge(b solver.Builder) *provenanceBridge { + return &provenanceBridge{llbBridge: &llbBridge{ builder: b, frontends: s.frontends, resolveWorker: s.resolveWorker, @@ -110,7 +111,11 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { resolveCacheImporterFuncs: s.resolveCacheImporterFuncs, cms: map[string]solver.CacheManager{}, sm: s.sm, - } + }} +} + +func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { + return s.bridge(b) } func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement, post []Processor) (*client.SolveResponse, error) { @@ -130,8 +135,9 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro j.SessionID = sessionID var res *frontend.Result + br := s.bridge(j) if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" { - fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs, sessionID, s.sm) + fwd := gateway.NewBridgeForwarder(ctx, br, s.workerController, req.FrontendInputs, sessionID, s.sm) defer fwd.Discard() if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil { return nil, err @@ -149,7 +155,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, err } } else { - res, err = s.Bridge(j).Solve(ctx, req, sessionID) + res, err = br.Solve(ctx, req, sessionID) if err != nil { return nil, err } @@ -178,35 +184,19 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, err } - if r := res.Ref; r != nil { - dtbi, err := buildinfo.Encode(ctx, res.Metadata, exptypes.ExporterBuildInfo, r.BuildSources()) - if err != nil { - return nil, err - } - if len(dtbi) > 0 { - res.AddMeta(exptypes.ExporterBuildInfo, dtbi) - } - } - for k, r := range res.Refs { - if r == nil { - continue - } - dtbi, err := buildinfo.Encode(ctx, res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), r.BuildSources()) - if err != nil { - return nil, err - } - if len(dtbi) > 0 { - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), dtbi) - } + resProv, err := addProvenanceToResult(res, br) + if err != nil { + return nil, err } for _, post := range post { - res2, err := post(ctx, res, s, j) + res2, err := post(ctx, resProv, s, j) if err != nil { return nil, err } - res = res2 + resProv = res2 } + res = resProv.Result cached, err := result.ConvertResult(res, func(res solver.ResultProxy) (solver.CachedResult, error) { return res.Result(ctx) @@ -361,6 +351,120 @@ func splitCacheExporters(exporters []RemoteCacheExporter) (rest []RemoteCacheExp return rest, inline } +func addProvenanceToResult(res *frontend.Result, br *provenanceBridge) (*Result, error) { + if res == nil { + return nil, nil + } + reqs, err := br.requests(res) + if err != nil { + return nil, err + } + out := &Result{ + Result: res, + Provenance: &provenance.Result{}, + } + if res.Ref != nil { + cp, err := getProvenance(res.Ref, reqs.ref.bridge, "", reqs) + if err != nil { + return nil, err + } + out.Provenance.Ref = cp + if res.Metadata == nil { + res.Metadata = map[string][]byte{} + } + if err := buildinfo.AddMetadata(res.Metadata, exptypes.ExporterBuildInfo, cp); err != nil { + return nil, err + } + } + if len(res.Refs) == 0 { + return out, nil + } + out.Provenance.Refs = make(map[string]*provenance.Capture, len(res.Refs)) + + for k, ref := range res.Refs { + cp, err := getProvenance(ref, reqs.refs[k].bridge, k, reqs) + if err != nil { + return nil, err + } + out.Provenance.Refs[k] = cp + if res.Metadata == nil { + res.Metadata = map[string][]byte{} + } + if err := buildinfo.AddMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), cp); err != nil { + return nil, err + } + } + return out, nil +} + +func getRefProvenance(ref solver.ResultProxy, br *provenanceBridge) (*provenance.Capture, error) { + if ref == nil { + return nil, nil + } + p := ref.Provenance() + if p == nil { + return nil, errors.Errorf("missing provenance for %s", ref.ID()) + } + pr, ok := p.(*provenance.Capture) + if !ok { + return nil, errors.Errorf("invalid provenance type %T", p) + } + + if br.req != nil { + pr.Frontend = br.req.Frontend + pr.Args = provenance.FilterArgs(br.req.FrontendOpt) + // TODO: should also save some output options like compression + + if len(br.req.FrontendInputs) > 0 { + pr.IncompleteMaterials = true // not implemented + } + } + + return pr, nil +} + +func getProvenance(ref solver.ResultProxy, br *provenanceBridge, id string, reqs *resultRequests) (*provenance.Capture, error) { + pr, err := getRefProvenance(ref, br) + if err != nil { + return nil, err + } + if pr == nil { + return nil, nil + } + + visited := reqs.allRes() + visited[ref.ID()] = struct{}{} + // provenance for all the refs not directly in the result needs to be captured as well + if err := br.eachRef(func(r solver.ResultProxy) error { + if _, ok := visited[r.ID()]; ok { + return nil + } + visited[r.ID()] = struct{}{} + pr2, err := getRefProvenance(r, br) + if err != nil { + return err + } + return pr.Merge(pr2) + }); err != nil { + return nil, err + } + + imgs := br.allImages() + if id != "" { + imgs = reqs.filterImagePlatforms(id, imgs) + } + for _, img := range imgs { + pr.AddImage(img) + } + + if err := pr.OptimizeImageSources(); err != nil { + return nil, err + } + pr.Sort() + + return pr, nil +} + type inlineCacheExporter interface { ExportForLayers(context.Context, []digest.Digest) ([]byte, error) } diff --git a/solver/llbsolver/vertex.go b/solver/llbsolver/vertex.go index 4f36c2eddbb3..0be9a5e8940c 100644 --- a/solver/llbsolver/vertex.go +++ b/solver/llbsolver/vertex.go @@ -6,6 +6,7 @@ import ( "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/entitlements" @@ -228,7 +229,7 @@ func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Dige return nil, errors.Errorf("invalid missing input digest %s", dgst) } - if err := ValidateOp(op); err != nil { + if err := opsutils.Validate(op); err != nil { return nil, err } @@ -301,63 +302,6 @@ func llbOpName(pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (st } } -func ValidateOp(op *pb.Op) error { - if op == nil { - return errors.Errorf("invalid nil op") - } - - switch op := op.Op.(type) { - case *pb.Op_Source: - if op.Source == nil { - return errors.Errorf("invalid nil source op") - } - case *pb.Op_Exec: - if op.Exec == nil { - return errors.Errorf("invalid nil exec op") - } - if op.Exec.Meta == nil { - return errors.Errorf("invalid exec op with no meta") - } - if len(op.Exec.Meta.Args) == 0 { - return errors.Errorf("invalid exec op with no args") - } - if len(op.Exec.Mounts) == 0 { - return errors.Errorf("invalid exec op with no mounts") - } - - isRoot := false - for _, m := range op.Exec.Mounts { - if m.Dest == pb.RootMount { - isRoot = true - break - } - } - if !isRoot { - return errors.Errorf("invalid exec op with no rootfs") - } - case *pb.Op_File: - if op.File == nil { - return errors.Errorf("invalid nil file op") - } - if len(op.File.Actions) == 0 { - return errors.Errorf("invalid file op with no actions") - } - case *pb.Op_Build: - if op.Build == nil { - return errors.Errorf("invalid nil build op") - } - case *pb.Op_Merge: - if op.Merge == nil { - return errors.Errorf("invalid nil merge op") - } - case *pb.Op_Diff: - if op.Diff == nil { - return errors.Errorf("invalid nil diff op") - } - } - return nil -} - func fileOpName(actions []*pb.FileAction) string { names := make([]string, 0, len(actions)) for _, action := range actions { diff --git a/solver/scheduler_test.go b/solver/scheduler_test.go index 6917baa81f7d..2e4c602bba5d 100644 --- a/solver/scheduler_test.go +++ b/solver/scheduler_test.go @@ -54,11 +54,10 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -81,10 +80,9 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -113,10 +111,9 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -149,10 +146,9 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g3.Vertex.(*vertex).setupCallCounters() - res, bi, err = j3.Build(ctx, g3) + res, err = j3.Build(ctx, g3) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) require.Equal(t, *g3.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g3.Vertex.(*vertex).execCallCount, int64(1)) @@ -192,18 +188,16 @@ func TestSingleLevelActiveGraph(t *testing.T) { eg, _ := errgroup.WithContext(ctx) eg.Go(func() error { - res, bi, err := j4.Build(ctx, g4) + res, err := j4.Build(ctx, g4) require.NoError(t, err) require.Equal(t, unwrap(res), "result4") - require.Equal(t, len(bi), 0) return err }) eg.Go(func() error { - res, bi, err := j5.Build(ctx, g4) + res, err := j5.Build(ctx, g4) require.NoError(t, err) require.Equal(t, unwrap(res), "result4") - require.Equal(t, len(bi), 0) return err }) @@ -240,10 +234,9 @@ func TestSingleLevelCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -267,10 +260,9 @@ func TestSingleLevelCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result1") - require.Equal(t, len(bi), 0) require.Equal(t, *g1.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g1.Vertex.(*vertex).execCallCount, int64(1)) @@ -298,10 +290,9 @@ func TestSingleLevelCache(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -366,18 +357,16 @@ func TestSingleLevelCacheParallel(t *testing.T) { eg, _ := errgroup.WithContext(ctx) eg.Go(func() error { - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) eg.Go(func() error { - res, bi, err := j1.Build(ctx, g1) + res, err := j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) @@ -460,18 +449,16 @@ func TestMultiLevelCacheParallel(t *testing.T) { eg, _ := errgroup.WithContext(ctx) eg.Go(func() error { - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) eg.Go(func() error { - res, bi, err := j1.Build(ctx, g1) + res, err := j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) @@ -514,7 +501,7 @@ func TestSingleCancelCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - _, _, err = j0.Build(ctx, g0) + _, err = j0.Build(ctx, g0) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -556,7 +543,7 @@ func TestSingleCancelExec(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - _, _, err = j1.Build(ctx, g1) + _, err = j1.Build(ctx, g1) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -609,7 +596,7 @@ func TestSingleCancelParallel(t *testing.T) { }), } - _, _, err = j.Build(ctx, g) + _, err = j.Build(ctx, g) close(firstErrored) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -633,10 +620,9 @@ func TestSingleCancelParallel(t *testing.T) { } <-firstReady - res, bi, err := j.Build(ctx, g) + res, err := j.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) return err }) @@ -683,10 +669,9 @@ func TestMultiLevelCalculation(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), 42) // 1 + 2*(7 + 2) + 2 + 2 + 19 - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -722,10 +707,9 @@ func TestMultiLevelCalculation(t *testing.T) { }, }), } - res, bi, err = j1.Build(ctx, g2) + res, err = j1.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrapInt(res), 42) - require.Equal(t, len(bi), 0) } func TestHugeGraph(t *testing.T) { @@ -757,10 +741,9 @@ func TestHugeGraph(t *testing.T) { // printGraph(g, "") g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), v) - require.Equal(t, len(bi), 0) require.Equal(t, int64(nodes), *g.Vertex.(*vertexSum).cacheCallCount) // execCount := *g.Vertex.(*vertexSum).execCallCount // require.True(t, execCount < 1000) @@ -780,10 +763,9 @@ func TestHugeGraph(t *testing.T) { }() g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err = j1.Build(ctx, g) + res, err = j1.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), v) - require.Equal(t, len(bi), 0) require.Equal(t, int64(nodes), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), *g.Vertex.(*vertexSum).execCallCount) @@ -837,10 +819,9 @@ func TestOptimizedCacheAccess(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(3), *g0.Vertex.(*vertex).execCallCount) @@ -884,10 +865,9 @@ func TestOptimizedCacheAccess(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) @@ -947,10 +927,9 @@ func TestOptimizedCacheAccess2(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(3), *g0.Vertex.(*vertex).execCallCount) @@ -995,10 +974,9 @@ func TestOptimizedCacheAccess2(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) @@ -1042,10 +1020,9 @@ func TestOptimizedCacheAccess2(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g2.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g2.Vertex.(*vertex).execCallCount) @@ -1093,10 +1070,9 @@ func TestSlowCache(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -1128,10 +1104,9 @@ func TestSlowCache(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -1186,11 +1161,9 @@ func TestParallelInputs(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) - require.NoError(t, j0.Discard()) j0 = nil @@ -1241,7 +1214,7 @@ func TestErrorReturns(t *testing.T) { }), } - _, _, err = j0.Build(ctx, g0) + _, err = j0.Build(ctx, g0) require.Error(t, err) require.Contains(t, err.Error(), "error-from-test") @@ -1282,7 +1255,7 @@ func TestErrorReturns(t *testing.T) { }), } - _, _, err = j1.Build(ctx, g1) + _, err = j1.Build(ctx, g1) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -1323,7 +1296,7 @@ func TestErrorReturns(t *testing.T) { }), } - _, _, err = j2.Build(ctx, g2) + _, err = j2.Build(ctx, g2) require.Error(t, err) require.Contains(t, err.Error(), "exec-error-from-test") @@ -1367,10 +1340,9 @@ func TestMultipleCacheSources(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(0), cacheManager.loadCounter) require.NoError(t, j0.Discard()) @@ -1410,10 +1382,10 @@ func TestMultipleCacheSources(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(1), cacheManager.loadCounter) require.Equal(t, int64(0), cacheManager2.loadCounter) @@ -1439,10 +1411,10 @@ func TestMultipleCacheSources(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g2) + res, err = j1.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), cacheManager.loadCounter) require.Equal(t, int64(0), cacheManager2.loadCounter) @@ -1484,10 +1456,10 @@ func TestRepeatBuildWithIgnoreCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -1523,10 +1495,10 @@ func TestRepeatBuildWithIgnoreCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0-1") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g1.Vertex.(*vertex).execCallCount) @@ -1561,10 +1533,10 @@ func TestRepeatBuildWithIgnoreCache(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0-2") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g2.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g2.Vertex.(*vertex).execCallCount) @@ -1611,10 +1583,10 @@ func TestIgnoreCacheResumeFromSlowCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -1652,10 +1624,10 @@ func TestIgnoreCacheResumeFromSlowCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) @@ -1690,10 +1662,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) // match by vertex digest j1, err := l.NewJob("j1") @@ -1715,10 +1686,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result1") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -1744,10 +1714,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) // match by cache key j3, err := l.NewJob("j3") @@ -1769,10 +1738,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g3.Vertex.(*vertex).setupCallCounters() - res, bi, err = j3.Build(ctx, g3) + res, err = j3.Build(ctx, g3) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) // add another ignorecache merges now @@ -1795,10 +1763,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g4.Vertex.(*vertex).setupCallCounters() - res, bi, err = j4.Build(ctx, g4) + res, err = j4.Build(ctx, g4) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) // add another !ignorecache merges now @@ -1820,10 +1787,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g5.Vertex.(*vertex).setupCallCounters() - res, bi, err = j5.Build(ctx, g5) + res, err = j5.Build(ctx, g5) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) } func TestSubbuild(t *testing.T) { @@ -1855,10 +1821,9 @@ func TestSubbuild(t *testing.T) { } g0.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 8) - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).execCallCount) @@ -1877,10 +1842,9 @@ func TestSubbuild(t *testing.T) { g0.Vertex.(*vertexSum).setupCallCounters() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 8) - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), *g0.Vertex.(*vertexSum).execCallCount) @@ -1929,10 +1893,9 @@ func TestCacheWithSelector(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -1971,10 +1934,9 @@ func TestCacheWithSelector(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(0), *g1.Vertex.(*vertex).execCallCount) @@ -2013,10 +1975,9 @@ func TestCacheWithSelector(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0-1") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g2.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g2.Vertex.(*vertex).execCallCount) @@ -2069,10 +2030,9 @@ func TestCacheSlowWithSelector(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -2114,10 +2074,9 @@ func TestCacheSlowWithSelector(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(0), *g1.Vertex.(*vertex).execCallCount) @@ -2157,10 +2116,9 @@ func TestCacheExporting(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 6) - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2189,10 +2147,9 @@ func TestCacheExporting(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 6) - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2247,10 +2204,9 @@ func TestCacheExportingModeMin(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2281,10 +2237,9 @@ func TestCacheExportingModeMin(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2316,10 +2271,9 @@ func TestCacheExportingModeMin(t *testing.T) { } }() - res, bi, err = j2.Build(ctx, g0) + res, err = j2.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -2399,10 +2353,10 @@ func TestSlowCacheAvoidAccess(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(0), cacheManager.loadCounter) require.NoError(t, j0.Discard()) @@ -2419,10 +2373,9 @@ func TestSlowCacheAvoidAccess(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2502,10 +2455,10 @@ func TestSlowCacheAvoidLoadOnCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "resultmain") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(0), cacheManager.loadCounter) require.NoError(t, j0.Discard()) @@ -2576,10 +2529,9 @@ func TestSlowCacheAvoidLoadOnCache(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "resultmain") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2621,10 +2573,9 @@ func TestCacheMultipleMaps(t *testing.T) { value: "result0", }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2658,10 +2609,9 @@ func TestCacheMultipleMaps(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2694,10 +2644,9 @@ func TestCacheMultipleMaps(t *testing.T) { }), } - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -2749,10 +2698,9 @@ func TestCacheInputMultipleMaps(t *testing.T) { }}, }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) expTarget := newTestExporterTarget() @@ -2791,10 +2739,9 @@ func TestCacheInputMultipleMaps(t *testing.T) { }}, }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) require.NoError(t, err) @@ -2848,10 +2795,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2882,10 +2828,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { g1 := g0 - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2937,10 +2882,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { }), } - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -2984,10 +2928,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { ), } - res, bi, err = j3.Build(ctx, g3) + res, err = j3.Build(ctx, g3) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) require.NoError(t, j3.Discard()) j3 = nil @@ -3083,10 +3026,9 @@ func TestCacheExportingMergedKey(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -3144,10 +3086,10 @@ func TestMergedEdgesLookup(t *testing.T) { } g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) + require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), cacheManager.loadCounter) @@ -3196,13 +3138,12 @@ func TestCacheLoadError(t *testing.T) { } g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(5), *g.Vertex.(*vertexSum).execCallCount) require.Equal(t, int64(0), cacheManager.loadCounter) - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -3221,13 +3162,12 @@ func TestCacheLoadError(t *testing.T) { g1.Vertex.(*vertexSum).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), *g.Vertex.(*vertexSum).execCallCount) require.Equal(t, int64(1), cacheManager.loadCounter) - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -3248,13 +3188,12 @@ func TestCacheLoadError(t *testing.T) { cacheManager.forceFail = true - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(5), *g.Vertex.(*vertexSum).execCallCount) require.Equal(t, int64(6), cacheManager.loadCounter) - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -3302,7 +3241,7 @@ func TestInputRequestDeadlock(t *testing.T) { }), } - _, _, err = j0.Build(ctx, g0) + _, err = j0.Build(ctx, g0) require.NoError(t, err) require.NoError(t, j0.Discard()) j0 = nil @@ -3340,7 +3279,7 @@ func TestInputRequestDeadlock(t *testing.T) { }), } - _, _, err = j1.Build(ctx, g1) + _, err = j1.Build(ctx, g1) require.NoError(t, err) require.NoError(t, j1.Discard()) j1 = nil @@ -3381,7 +3320,7 @@ func TestInputRequestDeadlock(t *testing.T) { }), } - _, _, err = j2.Build(ctx, g2) + _, err = j2.Build(ctx, g2) require.NoError(t, err) require.NoError(t, j2.Discard()) j2 = nil @@ -3684,7 +3623,7 @@ func (v *vertexSubBuild) Exec(ctx context.Context, g session.Group, inputs []Res if err := v.exec(ctx, inputs); err != nil { return nil, err } - res, _, err := v.b.Build(ctx, v.g) + res, err := v.b.Build(ctx, v.g) if err != nil { return nil, err } diff --git a/solver/types.go b/solver/types.go index b62da7680bb3..6635daef0e65 100644 --- a/solver/types.go +++ b/solver/types.go @@ -72,11 +72,17 @@ type CachedResult interface { CacheKeys() []ExportableCacheKey } +type CachedResultWithProvenance interface { + CachedResult + WalkProvenance(context.Context, func(ProvenanceProvider) error) error +} + type ResultProxy interface { + ID() string Result(context.Context) (CachedResult, error) Release(context.Context) error Definition() *pb.Definition - BuildSources() BuildSources + Provenance() interface{} } // CacheExportMode is the type for setting cache exporting modes @@ -161,6 +167,10 @@ type Op interface { Acquire(ctx context.Context) (release ReleaseFunc, err error) } +type ProvenanceProvider interface { + IsProvenanceProvider() +} + type ResultBasedCacheFunc func(context.Context, Result, session.Group) (digest.Digest, error) type PreprocessFunc func(context.Context, Result, session.Group) error @@ -198,15 +208,8 @@ type CacheMap struct { // such as oci descriptor content providers and progress writers to be passed to // the cache. Opts should not have any impact on the computed cache key. Opts CacheOpts - - // BuildSources contains build dependencies that will be set from source - // operation. - BuildSources BuildSources } -// BuildSources contains solved build dependencies. -type BuildSources map[string]string - // ExportableCacheKey is a cache key connected with an exporter that can export // a chain of cacherecords pointing to that key type ExportableCacheKey struct { diff --git a/source/manager.go b/source/manager.go index 3f4a0cb4783d..6a9c831c9048 100644 --- a/source/manager.go +++ b/source/manager.go @@ -16,7 +16,7 @@ type Source interface { } type SourceInstance interface { - CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) + CacheKey(ctx context.Context, g session.Group, index int) (key, pin string, opts solver.CacheOpts, done bool, err error) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) } diff --git a/util/buildinfo/buildinfo.go b/util/buildinfo/buildinfo.go index a36b767ac154..64b9ea48e146 100644 --- a/util/buildinfo/buildinfo.go +++ b/util/buildinfo/buildinfo.go @@ -10,12 +10,70 @@ import ( ctnref "github.com/containerd/containerd/reference" "github.com/docker/distribution/reference" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/source" binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/urlutil" "github.com/pkg/errors" ) +// BuildInfo format has been deprecated and will be removed in a future release. +// Use provenance attestations instead. + +func FromProvenance(c *provenance.Capture) (*binfotypes.BuildInfo, error) { + var bi binfotypes.BuildInfo + + bi.Frontend = c.Frontend + bi.Attrs = map[string]*string{} + for k, v := range c.Args { + v := v + bi.Attrs[k] = &v + } + + for _, s := range c.Sources.Images { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeDockerImage, + Ref: s.Ref, + Pin: s.Digest.String(), + }) + } + + for _, s := range c.Sources.HTTP { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeHTTP, + Ref: s.URL, + Pin: s.Digest.String(), + }) + } + + for _, s := range c.Sources.Git { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeGit, + Ref: s.URL, + Pin: s.Commit, + }) + } + + sort.Slice(bi.Sources, func(i, j int) bool { + return bi.Sources[i].Ref < bi.Sources[j].Ref + }) + + return &bi, nil +} + +func AddMetadata(metadata map[string][]byte, key string, c *provenance.Capture) error { + bi, err := FromProvenance(c) + if err != nil { + return err + } + dt, err := json.Marshal(bi) + if err != nil { + return err + } + metadata[key] = dt + return nil +} + // Decode decodes a base64 encoded build info. func Decode(enc string) (bi binfotypes.BuildInfo, _ error) { dec, err := base64.StdEncoding.DecodeString(enc) @@ -377,49 +435,6 @@ func isControlArg(attrKey string) bool { return false } -// GetMetadata returns buildinfo metadata for the specified key. If the key -// is already there, result will be merged. -func GetMetadata(metadata map[string][]byte, key string, reqFrontend string, reqAttrs map[string]string) ([]byte, error) { - if metadata == nil { - metadata = make(map[string][]byte) - } - var dtbi []byte - if v, ok := metadata[key]; ok && v != nil { - var mbi binfotypes.BuildInfo - if errm := json.Unmarshal(v, &mbi); errm != nil { - return nil, errors.Wrapf(errm, "failed to unmarshal build info for %q", key) - } - if reqFrontend != "" { - mbi.Frontend = reqFrontend - } - if deps, err := decodeDeps(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))); err == nil { - mbi.Deps = reduceMapBuildInfo(deps, mbi.Deps) - } else { - return nil, err - } - mbi.Attrs = filterAttrs(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))) - var err error - dtbi, err = json.Marshal(mbi) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) - } - } else { - deps, err := decodeDeps(key, convertMap(reqAttrs)) - if err != nil { - return nil, err - } - dtbi, err = json.Marshal(binfotypes.BuildInfo{ - Frontend: reqFrontend, - Attrs: filterAttrs(key, convertMap(reqAttrs)), - Deps: deps, - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) - } - } - return dtbi, nil -} - func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]string { if m1 == nil && m2 == nil { return nil @@ -434,25 +449,3 @@ func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]str } return m1 } - -func reduceMapBuildInfo(m1 map[string]binfotypes.BuildInfo, m2 map[string]binfotypes.BuildInfo) map[string]binfotypes.BuildInfo { - if m1 == nil && m2 == nil { - return nil - } - if m1 == nil { - m1 = map[string]binfotypes.BuildInfo{} - } - for k, v := range m2 { - m1[k] = v - } - return m1 -} - -func convertMap(m map[string]string) map[string]*string { - res := make(map[string]*string) - for k, v := range m { - value := v - res[k] = &value - } - return res -} diff --git a/util/provenance/buildinfo.go b/util/provenance/buildinfo.go deleted file mode 100644 index 90dc8028777d..000000000000 --- a/util/provenance/buildinfo.go +++ /dev/null @@ -1,164 +0,0 @@ -package provenance - -import ( - "encoding/hex" - "strings" - - distreference "github.com/docker/distribution/reference" - slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - digest "github.com/opencontainers/go-digest" - ocispecs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var BuildKitBuildType = "https://mobyproject.org/buildkit@v1" - -type ProvenancePredicate struct { - slsa.ProvenancePredicate - Metadata *ProvenanceMetadata `json:"metadata,omitempty"` - Source *Source `json:"buildSource,omitempty"` - Layers map[string][][]ocispecs.Descriptor `json:"buildLayers,omitempty"` -} - -type ProvenanceMetadata struct { - slsa.ProvenanceMetadata - VCS map[string]string `json:"vcs,omitempty"` -} - -func convertMaterial(s binfotypes.Source) (*slsa.ProvenanceMaterial, error) { - // https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst - switch s.Type { - case binfotypes.SourceTypeDockerImage: - dgst, err := digest.Parse(s.Pin) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse digest %q for %s", s.Pin, s.Ref) - } - named, err := distreference.ParseNamed(s.Ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse ref %q", s.Ref) - } - version := "" - if tagged, ok := named.(distreference.Tagged); ok { - version = tagged.Tag() - } else { - if canonical, ok := named.(distreference.Canonical); ok { - version = canonical.Digest().String() - } - } - uri := "pkg:docker/" + distreference.FamiliarName(named) - if version != "" { - uri += "@" + version - } - return &slsa.ProvenanceMaterial{ - URI: uri, - Digest: slsa.DigestSet{ - dgst.Algorithm().String(): dgst.Hex(), - }, - }, nil - case binfotypes.SourceTypeGit: - if _, err := hex.DecodeString(s.Pin); err != nil { - return nil, errors.Wrapf(err, "failed to parse commit %q for %s", s.Pin, s.Ref) - } - return &slsa.ProvenanceMaterial{ - URI: s.Ref, - Digest: slsa.DigestSet{ - "sha1": s.Pin, // TODO: check length? - }, - }, nil - case binfotypes.SourceTypeHTTP: - dgst, err := digest.Parse(s.Pin) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse digest %q for %s", s.Pin, s.Ref) - } - return &slsa.ProvenanceMaterial{ - URI: s.Ref, - Digest: slsa.DigestSet{ - dgst.Algorithm().String(): dgst.Hex(), - }, - }, nil - default: - return nil, errors.Errorf("unsupported source type %q", s.Type) - } -} - -func findMaterial(srcs []binfotypes.Source, uri string) (*slsa.ProvenanceMaterial, bool) { - for _, s := range srcs { - if s.Ref == uri { - m, err := convertMaterial(s) - if err != nil { - continue - } - return m, true - } - } - return nil, false -} - -func FromBuildInfo(bi binfotypes.BuildInfo) (*ProvenancePredicate, error) { - materials := make([]slsa.ProvenanceMaterial, len(bi.Sources)) - for i, s := range bi.Sources { - m, err := convertMaterial(s) - if err != nil { - return nil, err - } - materials[i] = *m - } - - inv := slsa.ProvenanceInvocation{} - - contextKey := "context" - if v, ok := bi.Attrs["contextkey"]; ok && v != nil { - contextKey = *v - } - - if v, ok := bi.Attrs[contextKey]; ok && v != nil { - if m, ok := findMaterial(bi.Sources, *v); ok { - inv.ConfigSource.URI = m.URI - inv.ConfigSource.Digest = m.Digest - } else { - inv.ConfigSource.URI = *v - } - delete(bi.Attrs, contextKey) - } - - if v, ok := bi.Attrs["filename"]; ok && v != nil { - inv.ConfigSource.EntryPoint = *v - delete(bi.Attrs, "filename") - } - - vcs := make(map[string]string) - for k, v := range bi.Attrs { - if strings.HasPrefix(k, "vcs:") { - delete(bi.Attrs, k) - if v != nil { - vcs[strings.TrimPrefix(k, "vcs:")] = *v - } - } - } - - inv.Parameters = bi.Attrs - - pr := &ProvenancePredicate{ - ProvenancePredicate: slsa.ProvenancePredicate{ - BuildType: BuildKitBuildType, - Invocation: inv, - Materials: materials, - }, - Metadata: &ProvenanceMetadata{ - ProvenanceMetadata: slsa.ProvenanceMetadata{ - Completeness: slsa.ProvenanceComplete{ - Parameters: true, - Environment: true, - Materials: true, // TODO: check that there were no local sources - }, - }, - }, - } - - if len(vcs) > 0 { - pr.Metadata.VCS = vcs - } - - return pr, nil -} diff --git a/util/testutil/imageinfo.go b/util/testutil/imageinfo.go index 64e01dafa5b1..11cecc726792 100644 --- a/util/testutil/imageinfo.go +++ b/util/testutil/imageinfo.go @@ -43,6 +43,19 @@ func (idx ImagesInfo) Filter(platform string) *ImagesInfo { return result } +func (idx ImagesInfo) FindAttestation(platform string) *ImageInfo { + img := idx.Find(platform) + if img == nil { + return nil + } + for _, info := range idx.Images { + if info.Desc.Annotations["vnd.docker.reference.digest"] == string(img.Desc.Digest) { + return info + } + } + return nil +} + func ReadImages(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImagesInfo, error) { idx := &ImagesInfo{Desc: desc} diff --git a/vendor/github.com/package-url/packageurl-go/.gitignore b/vendor/github.com/package-url/packageurl-go/.gitignore index d373807a9374..a1338d68517e 100644 --- a/vendor/github.com/package-url/packageurl-go/.gitignore +++ b/vendor/github.com/package-url/packageurl-go/.gitignore @@ -7,8 +7,6 @@ # Test binary, build with `go test -c` *.test -testdata/*json - # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/vendor/github.com/package-url/packageurl-go/.golangci.yaml b/vendor/github.com/package-url/packageurl-go/.golangci.yaml new file mode 100644 index 000000000000..73a5741c9270 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/.golangci.yaml @@ -0,0 +1,17 @@ +# individual linter configs go here +linters-settings: + +# default linters are enabled `golangci-lint help linters` +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck \ No newline at end of file diff --git a/vendor/github.com/package-url/packageurl-go/.travis.yml b/vendor/github.com/package-url/packageurl-go/.travis.yml deleted file mode 100644 index 1bb07d03a3af..000000000000 --- a/vendor/github.com/package-url/packageurl-go/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go - -go: - - 1.12 - - tip - -install: true - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - make lint - - make test - -notifications: - email: false diff --git a/vendor/github.com/package-url/packageurl-go/mit.LICENSE b/vendor/github.com/package-url/packageurl-go/LICENSE similarity index 100% rename from vendor/github.com/package-url/packageurl-go/mit.LICENSE rename to vendor/github.com/package-url/packageurl-go/LICENSE diff --git a/vendor/github.com/package-url/packageurl-go/README.md b/vendor/github.com/package-url/packageurl-go/README.md index 68b42ac18e07..783985498b0b 100644 --- a/vendor/github.com/package-url/packageurl-go/README.md +++ b/vendor/github.com/package-url/packageurl-go/README.md @@ -1,8 +1,8 @@ # packageurl-go -Go implementation of the package url spec +[![build](https://github.com/package-url/packageurl-go/workflows/test/badge.svg)](https://github.com/package-url/packageurl-go/actions?query=workflow%3Atest) [![Coverage Status](https://coveralls.io/repos/github/package-url/packageurl-go/badge.svg)](https://coveralls.io/github/package-url/packageurl-go) [![PkgGoDev](https://pkg.go.dev/badge/github.com/package-url/packageurl-go)](https://pkg.go.dev/github.com/package-url/packageurl-go) [![Go Report Card](https://goreportcard.com/badge/github.com/package-url/packageurl-go)](https://goreportcard.com/report/github.com/package-url/packageurl-go) -[![Build Status](https://travis-ci.com/package-url/packageurl-go.svg)](https://travis-ci.com/package-url/packageurl-go) +Go implementation of the package url spec. ## Install @@ -55,7 +55,7 @@ func main() { ## Test -Testing using the normal ``go test`` command. Using ``make test`` will pull down the test fixtures shared between all package-url projects and then execute the tests. +Testing using the normal ``go test`` command. Using ``make test`` will pull the test fixtures shared between all package-url projects and then execute the tests. ``` $ make test diff --git a/vendor/github.com/package-url/packageurl-go/packageurl.go b/vendor/github.com/package-url/packageurl-go/packageurl.go index b521429f58d2..3cba7095d5f1 100644 --- a/vendor/github.com/package-url/packageurl-go/packageurl.go +++ b/vendor/github.com/package-url/packageurl-go/packageurl.go @@ -47,10 +47,20 @@ var ( var ( // TypeBitbucket is a pkg:bitbucket purl. TypeBitbucket = "bitbucket" + // TypeCocoapods is a pkg:cocoapods purl. + TypeCocoapods = "cocoapods" + // TypeCargo is a pkg:cargo purl. + TypeCargo = "cargo" // TypeComposer is a pkg:composer purl. TypeComposer = "composer" + // TypeConan is a pkg:conan purl. + TypeConan = "conan" + // TypeConda is a pkg:conda purl. + TypeConda = "conda" + // TypeCran is a pkg:cran purl. + TypeCran = "cran" // TypeDebian is a pkg:deb purl. - TypeDebian = "debian" + TypeDebian = "deb" // TypeDocker is a pkg:docker purl. TypeDocker = "docker" // TypeGem is a pkg:gem purl. @@ -61,16 +71,24 @@ var ( TypeGithub = "github" // TypeGolang is a pkg:golang purl. TypeGolang = "golang" + // TypeHackage is a pkg:hackage purl. + TypeHackage = "hackage" + // TypeHex is a pkg:hex purl. + TypeHex = "hex" // TypeMaven is a pkg:maven purl. TypeMaven = "maven" // TypeNPM is a pkg:npm purl. TypeNPM = "npm" // TypeNuget is a pkg:nuget purl. TypeNuget = "nuget" + // TypeOCI is a pkg:oci purl + TypeOCI = "oci" // TypePyPi is a pkg:pypi purl. TypePyPi = "pypi" // TypeRPM is a pkg:rpm purl. TypeRPM = "rpm" + // TypeSwift is pkg:swift purl + TypeSwift = "swift" ) // Qualifier represents a single key=value qualifier in the package url @@ -80,7 +98,7 @@ type Qualifier struct { } func (q Qualifier) String() string { - // A value must be must be a percent-encoded string + // A value must be a percent-encoded string return fmt.Sprintf("%s=%s", q.Key, url.PathEscape(q.Value)) } @@ -106,7 +124,7 @@ func QualifiersFromMap(mm map[string]string) Qualifiers { // Map converts a Qualifiers struct to a string map. func (qq Qualifiers) Map() map[string]string { - m := make(map[string]string, 0) + m := make(map[string]string) for i := 0; i < len(qq); i++ { k := qq[i].Key @@ -149,21 +167,22 @@ func NewPackageURL(purlType, namespace, name, version string, } } -// ToString returns the human readable instance of the PackageURL structure. +// ToString returns the human-readable instance of the PackageURL structure. // This is the literal purl as defined by the spec. func (p *PackageURL) ToString() string { // Start with the type and a colon purl := fmt.Sprintf("pkg:%s/", p.Type) // Add namespaces if provided if p.Namespace != "" { - ns := []string{} + var ns []string for _, item := range strings.Split(p.Namespace, "/") { ns = append(ns, url.QueryEscape(item)) } purl = purl + strings.Join(ns, "/") + "/" } // The name is always required and must be a percent-encoded string - purl = purl + url.PathEscape(p.Name) + // Use url.QueryEscape instead of PathEscape, as it handles @ signs + purl = purl + url.QueryEscape(p.Name) // If a version is provided, add it after the at symbol if p.Version != "" { // A name must be a percent-encoded string @@ -175,7 +194,7 @@ func (p *PackageURL) ToString() string { for _, q := range p.Qualifiers { qualifiers = append(qualifiers, q.String()) } - // If there one or more key=value pairs then append on the package url + // If there are one or more key=value pairs, append on the package url if len(qualifiers) != 0 { purl = purl + "?" + strings.Join(qualifiers, "&") } @@ -186,7 +205,7 @@ func (p *PackageURL) ToString() string { return purl } -func (p *PackageURL) String() string { +func (p PackageURL) String() string { return p.ToString() } @@ -274,9 +293,14 @@ func FromString(purl string) (PackageURL, error) { return PackageURL{}, fmt.Errorf("failed to unescape purl version: %s", err) } version = v - name = name[:atIndex] + + unecapeName, err := url.PathUnescape(name[:atIndex]) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape purl name: %s", err) + } + name = unecapeName } - namespaces := []string{} + var namespaces []string if index != -1 { remainder = remainder[:index] @@ -299,6 +323,11 @@ func FromString(purl string) (PackageURL, error) { return PackageURL{}, errors.New("name is required") } + err := validCustomRules(purlType, name, namespace, version, qualifiers) + if err != nil { + return PackageURL{}, err + } + return PackageURL{ Type: purlType, Namespace: namespace, @@ -331,6 +360,43 @@ func typeAdjustName(purlType, name string) string { return name } +// validQualifierKey validates a qualifierKey against our QualifierKeyPattern. func validQualifierKey(key string) bool { return QualifierKeyPattern.MatchString(key) } + +// validCustomRules evaluates additional rules for each package url type, as specified in the package-url specification. +// On success, it returns nil. On failure, a descriptive error will be returned. +func validCustomRules(purlType, name, ns, version string, qualifiers Qualifiers) error { + q := qualifiers.Map() + switch purlType { + case TypeConan: + if ns != "" { + if val, ok := q["channel"]; ok { + if val == "" { + return errors.New("the qualifier channel must be not empty if namespace is present") + } + } else { + return errors.New("channel qualifier does not exist") + } + } else { + if val, ok := q["channel"]; ok { + if val != "" { + return errors.New("namespace is required if channel is non empty") + } + } + } + case TypeSwift: + if ns == "" { + return errors.New("namespace is required") + } + if version == "" { + return errors.New("version is required") + } + case TypeCran: + if version == "" { + return errors.New("version is required") + } + } + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 25e8d7a79682..b96f60c4061b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -587,8 +587,8 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/package-url/packageurl-go v0.1.0 -## explicit; go 1.12 +# github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 +## explicit; go 1.17 github.com/package-url/packageurl-go # github.com/pelletier/go-toml v1.9.4 ## explicit; go 1.12