diff --git a/go.mod b/go.mod index ce64ff38d8..e12a604cee 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.24.0 require ( github.com/client9/misspell v0.3.4 github.com/go-jose/go-jose/v3 v3.0.4 - github.com/golangci/golangci-lint/v2 v2.6.2 + github.com/golangci/golangci-lint/v2 v2.7.2 github.com/google/go-github/v67 v67.0.0 github.com/google/uuid v1.6.0 github.com/hashicorp/go-cty v1.5.0 @@ -32,7 +32,7 @@ require ( github.com/BurntSushi/toml v1.5.0 // indirect github.com/Djarvur/go-err113 v0.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect - github.com/MirrexOne/unqueryvet v1.2.1 // indirect + github.com/MirrexOne/unqueryvet v1.3.0 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/agext/levenshtein v1.2.2 // indirect @@ -56,7 +56,7 @@ require ( github.com/breml/errchkjson v0.4.1 // indirect github.com/butuzov/ireturn v0.4.0 // indirect github.com/butuzov/mirror v1.3.0 // indirect - github.com/catenacyber/perfsprint v0.10.0 // indirect + github.com/catenacyber/perfsprint v0.10.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.11 // indirect @@ -91,7 +91,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/godoc-lint/godoc-lint v0.10.1 // indirect + github.com/godoc-lint/godoc-lint v0.10.2 // indirect github.com/gofrs/flock v0.13.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/asciicheck v0.5.0 // indirect @@ -120,7 +120,7 @@ require ( github.com/hashicorp/go-plugin v1.7.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hc-install v0.9.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -163,7 +163,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mgechev/revive v1.12.0 // indirect + github.com/mgechev/revive v1.13.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect @@ -199,26 +199,26 @@ require ( github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect - github.com/securego/gosec/v2 v2.22.10 // indirect + github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7 // indirect github.com/shurcooL/graphql v0.0.0-20220606043923-3cf50f8a0a29 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sonatard/noctx v0.4.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect - github.com/spf13/afero v1.14.0 // indirect + github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/spf13/viper v1.12.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect + github.com/stbenjam/no-sprintf-host-port v0.3.1 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.1 // indirect github.com/tetafro/godot v1.5.4 // indirect github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect github.com/timonwong/loggercheck v0.11.0 // indirect - github.com/tomarrell/wrapcheck/v2 v2.11.0 // indirect + github.com/tomarrell/wrapcheck/v2 v2.12.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/ultraware/funlen v0.2.0 // indirect github.com/ultraware/whitespace v0.2.0 // indirect @@ -241,13 +241,14 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.10.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.29.0 // indirect + golang.org/x/mod v0.30.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.31.0 // indirect - golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools v0.39.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect google.golang.org/grpc v1.75.1 // indirect diff --git a/go.sum b/go.sum index 3e0e41bd61..5d993e536c 100644 --- a/go.sum +++ b/go.sum @@ -67,8 +67,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/MirrexOne/unqueryvet v1.2.1 h1:M+zdXMq84g+E1YOLa7g7ExN3dWfZQrdDSTCM7gC+m/A= -github.com/MirrexOne/unqueryvet v1.2.1/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= +github.com/MirrexOne/unqueryvet v1.3.0 h1:5slWSomgqpYU4zFuZ3NNOfOUxVPlXFDBPAVasZOGlAY= +github.com/MirrexOne/unqueryvet v1.3.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= @@ -129,8 +129,8 @@ github.com/butuzov/ireturn v0.4.0 h1:+s76bF/PfeKEdbG8b54aCocxXmi0wvYdOVsWxVO7n8E github.com/butuzov/ireturn v0.4.0/go.mod h1:ghI0FrCmap8pDWZwfPisFD1vEc56VKH4NpQUxDHta70= github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= -github.com/catenacyber/perfsprint v0.10.0 h1:AZj1mYyxbxLRqmnYOeguZXEQwWOgQGm2wzLI5d7Hl/0= -github.com/catenacyber/perfsprint v0.10.0/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= +github.com/catenacyber/perfsprint v0.10.1 h1:u7Riei30bk46XsG8nknMhKLXG9BcXz3+3tl/WpKm0PQ= +github.com/catenacyber/perfsprint v0.10.1/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc= github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -257,8 +257,8 @@ github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUW github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godoc-lint/godoc-lint v0.10.1 h1:ZPUVzlDtJfA+P688JfPJPkI/SuzcBr/753yGIk5bOPA= -github.com/godoc-lint/godoc-lint v0.10.1/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= +github.com/godoc-lint/godoc-lint v0.10.2 h1:dksNgK+zebnVlj4Fx83CRnCmPO0qRat/9xfFsir1nfg= +github.com/godoc-lint/godoc-lint v0.10.2/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -302,8 +302,8 @@ github.com/golangci/go-printf-func-name v0.1.1 h1:hIYTFJqAGp1iwoIfsNTpoq1xZAarog github.com/golangci/go-printf-func-name v0.1.1/go.mod h1:Es64MpWEZbh0UBtTAICOZiB+miW53w/K9Or/4QogJss= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= -github.com/golangci/golangci-lint/v2 v2.6.2 h1:jkMSVv36JmyTENcEertckvimvjPcD5qxNM7W7qhECvI= -github.com/golangci/golangci-lint/v2 v2.6.2/go.mod h1:fSIMDiBt9kzdpnvvV7GO6iWzyv5uaeZ+iPor+2uRczE= +github.com/golangci/golangci-lint/v2 v2.7.2 h1:AhBC+YeEueec4AGlIbvPym5C70Thx0JykIqXbdIXWx0= +github.com/golangci/golangci-lint/v2 v2.7.2/go.mod h1:pDijleoBu7e8sejMqyZ3L5n6geqe+cVvOAz2QImqqVc= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 h1:AkK+w9FZBXlU/xUmBtSJN1+tAI4FIvy5WtnUnY8e4p8= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= @@ -390,8 +390,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -513,8 +513,8 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU= -github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8= +github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM= +github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -548,8 +548,8 @@ github.com/nunnatsa/ginkgolinter v0.21.2 h1:khzWfm2/Br8ZemX8QM1pl72LwM+rMeW6VUbQ github.com/nunnatsa/ginkgolinter v0.21.2/go.mod h1:GItSI5fw7mCGLPmkvGYrr1kEetZe7B593jcyOpyabsY= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/onsi/ginkgo/v2 v2.26.0 h1:1J4Wut1IlYZNEAWIV3ALrT9NfiaGW2cDCJQSFQMs/gE= -github.com/onsi/ginkgo/v2 v2.26.0/go.mod h1:qhEywmzWTBUY88kfO0BRvX4py7scov9yR+Az2oavUzw= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= @@ -629,8 +629,8 @@ github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tM github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iMf7Knkq057v4XOQ= github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= -github.com/securego/gosec/v2 v2.22.10 h1:ntbBqdWXnu46DUOXn+R2SvPo3PiJCDugTCgTW2g4tQg= -github.com/securego/gosec/v2 v2.22.10/go.mod h1:9UNjK3tLpv/w2b0+7r82byV43wCJDNtEDQMeS+H/g2w= +github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7 h1:rZg6IGn0ySYZwCX8LHwZoYm03JhG/cVAJJ3O+u3Vclo= +github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7/go.mod h1:9sr22NZO5Kfh7unW/xZxkGYTmj2484/fCiE54gw7UTY= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shurcooL/githubv4 v0.0.0-20221126192849-0b5c4c7994eb h1:foJysa74+t41fG7adnt+TkfcNxQUWid8R/HlXe+Mmbw= @@ -652,12 +652,12 @@ github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= -github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -668,8 +668,8 @@ github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= -github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= +github.com/stbenjam/no-sprintf-host-port v0.3.1 h1:AyX7+dxI4IdLBPtDbsGAyqiTSLpCP9hWRrXQDU4Cm/g= +github.com/stbenjam/no-sprintf-host-port v0.3.1/go.mod h1:ODbZesTCHMVKthBHskvUUexdcNHAQRXk9NpSsL8p/HQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -693,8 +693,8 @@ github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwTnnNKn4M= github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= -github.com/tomarrell/wrapcheck/v2 v2.11.0 h1:BJSt36snX9+4WTIXeJ7nvHBQBcm1h2SjQMSlmQ6aFSU= -github.com/tomarrell/wrapcheck/v2 v2.11.0/go.mod h1:wFL9pDWDAbXhhPZZt+nG8Fu+h29TtnZ2MW6Lx4BRXIU= +github.com/tomarrell/wrapcheck/v2 v2.12.0 h1:H/qQ1aNWz/eeIhxKAFvkfIA+N7YDvq6TWVFL27Of9is= +github.com/tomarrell/wrapcheck/v2 v2.12.0/go.mod h1:AQhQuZd0p7b6rfW+vUwHm5OMCGgp63moQ9Qr/0BpIWo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= @@ -829,8 +829,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1030,8 +1030,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= diff --git a/vendor/github.com/MirrexOne/unqueryvet/.golangci.yml b/vendor/github.com/MirrexOne/unqueryvet/.golangci.yml new file mode 100644 index 0000000000..d604d323fd --- /dev/null +++ b/vendor/github.com/MirrexOne/unqueryvet/.golangci.yml @@ -0,0 +1,20 @@ +version: "2" + +formatters: + enable: + - gofumpt + - goimports + settings: + gofumpt: + extra-rules: true + +linters: + exclusions: + warn-unused: true + presets: + - comments + - std-error-handling + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/vendor/github.com/MirrexOne/unqueryvet/README.md b/vendor/github.com/MirrexOne/unqueryvet/README.md index 6c00223f74..407f9d289a 100644 --- a/vendor/github.com/MirrexOne/unqueryvet/README.md +++ b/vendor/github.com/MirrexOne/unqueryvet/README.md @@ -9,6 +9,7 @@ unqueryvet is a Go static analysis tool (linter) that detects `SELECT *` usage i ## Features - **Detects `SELECT *` in string literals** - Finds problematic queries in your Go code +- **Constants and variables support** - Detects `SELECT *` in const and var declarations - **SQL Builder support** - Works with popular SQL builders like Squirrel, GORM, etc. - **Highly configurable** - Extensive configuration options for different use cases - **Supports `//nolint:unqueryvet`** - Standard Go linting suppression @@ -55,20 +56,22 @@ unqueryvet ./... Add to your `.golangci.yml`: ```yaml +version: "2" + linters: enable: - unqueryvet -linters-settings: - unqueryvet: - check-sql-builders: true - # By default, no functions are ignored - minimal configuration - # ignored-functions: - # - "fmt.Printf" - # - "log.Printf" - # allowed-patterns: - # - "SELECT \\* FROM information_schema\\..*" - # - "SELECT \\* FROM pg_catalog\\..*" + settings: + unqueryvet: + check-sql-builders: true + # By default, no functions are ignored - minimal configuration + # ignored-functions: + # - "fmt.Printf" + # - "log.Printf" + # allowed-patterns: + # - "SELECT \\* FROM information_schema\\..*" + # - "SELECT \\* FROM pg_catalog\\..*" ``` ## Examples @@ -76,6 +79,12 @@ linters-settings: ### Problematic code (will trigger warnings) ```go +// Constants with SELECT * +const QueryUsers = "SELECT * FROM users" + +// Variables with SELECT * +var QueryOrders = "SELECT * FROM orders" + // String literals with SELECT * query := "SELECT * FROM users" rows, err := db.Query("SELECT * FROM orders WHERE status = ?", "active") @@ -88,7 +97,13 @@ query := builder.Select().Columns("*").From("inventory") ### Good code (recommended) ```go -// Explicit column selection +// Constants with explicit columns +const QueryUsers = "SELECT id, name, email FROM users" + +// Variables with explicit columns +var QueryOrders = "SELECT id, status, total FROM orders" + +// String literals with explicit column selection query := "SELECT id, name, email FROM users" rows, err := db.Query("SELECT id, total FROM orders WHERE status = ?", "active") @@ -117,40 +132,20 @@ query := "SELECT * FROM debug_table" //nolint:unqueryvet Unqueryvet is highly configurable to fit your project's needs: ```yaml -linters-settings: - unqueryvet: - # Enable/disable SQL builder checking (default: true) - check-sql-builders: true - - # Optional: Functions to ignore during analysis (empty by default - minimal config) - # ignored-functions: - # - "fmt.Printf" - # - "log.Printf" - # - "debug.Query" - - # Optional: Packages to ignore completely (empty by default) - # ignored-packages: - # - "testing" - # - "debug" - - # Default allowed patterns (automatically included): - # - COUNT(*), MAX(*), MIN(*) functions - # - information_schema, pg_catalog, sys schema queries - # You can add more patterns if needed: - # allowed-patterns: - # - "SELECT \\* FROM temp_.*" - - # Default ignored file patterns (automatically included): - # *_test.go, *.pb.go, *_gen.go, *.gen.go, *_generated.go - # You can add more patterns if needed: - # ignored-file-patterns: - # - "my_special_pattern.go" +version: "2" + +linters: + settings: + unqueryvet: + # Enable/disable SQL builder checking (default: true) + check-sql-builders: true - # Default ignored directories (automatically included): - # vendor, testdata, migrations, generated, .git, node_modules - # You can add more directories if needed: - # ignored-directories: - # - "my_special_dir" + # Default allowed patterns (automatically included): + # - COUNT(*), MAX(*), MIN(*) functions + # - information_schema, pg_catalog, sys schema queries + # You can add more patterns if needed: + # allowed-patterns: + # - "SELECT \\* FROM temp_.*" ``` ## Supported SQL Builders @@ -173,12 +168,10 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: 1.23 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 - name: golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@v8 with: version: latest args: --enable unqueryvet @@ -206,10 +199,10 @@ unqueryvet -v ./... Unqueryvet is designed to be fast and lightweight: -- **Parallel processing** - Analyzes multiple files concurrently -- **Incremental analysis** - Only analyzes changed files when possible -- **Minimal memory footprint** - Efficient AST traversal -- **Smart caching** - Reuses analysis results when appropriate +- **Parallel processing**: Analyzes multiple files concurrently +- **Incremental analysis**: Only analyzes changed files when possible +- **Minimal memory footprint**: Efficient AST traversal +- **Smart caching**: Reuses analysis results when appropriate ## Advanced Usage @@ -265,5 +258,3 @@ MIT License - see [LICENSE](LICENSE) file for details. ## Support - **Bug Reports**: [GitHub Issues](https://github.com/MirrexOne/unqueryvet/issues) - ---- diff --git a/vendor/github.com/MirrexOne/unqueryvet/internal/analyzer/analyzer.go b/vendor/github.com/MirrexOne/unqueryvet/internal/analyzer/analyzer.go index 1627ee144a..ce9b9874c5 100644 --- a/vendor/github.com/MirrexOne/unqueryvet/internal/analyzer/analyzer.go +++ b/vendor/github.com/MirrexOne/unqueryvet/internal/analyzer/analyzer.go @@ -64,11 +64,11 @@ func RunWithConfig(pass *analysis.Pass, cfg *config.UnqueryvetSettings) (any, er (*ast.CallExpr)(nil), // Function/method calls (*ast.File)(nil), // Files (for SQL builder analysis) (*ast.AssignStmt)(nil), // Assignment statements for standalone literals + (*ast.GenDecl)(nil), // General declarations (const, var, type) } // Walk through all AST nodes and analyze them insp.Preorder(nodeFilter, func(n ast.Node) { - switch node := n.(type) { case *ast.File: // Analyze SQL builders only if enabled in configuration @@ -78,6 +78,9 @@ func RunWithConfig(pass *analysis.Pass, cfg *config.UnqueryvetSettings) (any, er case *ast.AssignStmt: // Check assignment statements for standalone SQL literals checkAssignStmt(pass, node, cfg) + case *ast.GenDecl: + // Check constant and variable declarations + checkGenDecl(pass, node, cfg) case *ast.CallExpr: // Analyze function calls for SQL with SELECT * usage checkCallExpr(pass, node, cfg) @@ -96,6 +99,7 @@ func run(pass *analysis.Pass) (any, error) { (*ast.CallExpr)(nil), // Function/method calls (*ast.File)(nil), // Files (for SQL builder analysis) (*ast.AssignStmt)(nil), // Assignment statements for standalone literals + (*ast.GenDecl)(nil), // General declarations (const, var) } // Always use default settings since passing settings through ResultOf doesn't work reliably @@ -104,7 +108,6 @@ func run(pass *analysis.Pass) (any, error) { // Walk through all AST nodes and analyze them insp.Preorder(nodeFilter, func(n ast.Node) { - switch node := n.(type) { case *ast.File: // Analyze SQL builders only if enabled in configuration @@ -114,6 +117,9 @@ func run(pass *analysis.Pass) (any, error) { case *ast.AssignStmt: // Check assignment statements for standalone SQL literals checkAssignStmt(pass, node, cfg) + case *ast.GenDecl: + // Check constant and variable declarations + checkGenDecl(pass, node, cfg) case *ast.CallExpr: // Analyze function calls for SQL with SELECT * usage checkCallExpr(pass, node, cfg) @@ -140,10 +146,40 @@ func checkAssignStmt(pass *analysis.Pass, stmt *ast.AssignStmt, cfg *config.Unqu } } +// checkGenDecl checks general declarations (const, var) for SELECT * in SQL queries +func checkGenDecl(pass *analysis.Pass, decl *ast.GenDecl, cfg *config.UnqueryvetSettings) { + // Only check const and var declarations + if decl.Tok != token.CONST && decl.Tok != token.VAR { + return + } + + // Iterate through all specifications in the declaration + for _, spec := range decl.Specs { + // Type assert to ValueSpec (const/var specifications) + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + + // Check all values in the specification + for _, value := range valueSpec.Values { + // Only check direct string literals + if lit, ok := value.(*ast.BasicLit); ok && lit.Kind == token.STRING { + content := normalizeSQLQuery(lit.Value) + if isSelectStarQuery(content, cfg) { + pass.Report(analysis.Diagnostic{ + Pos: lit.Pos(), + Message: getWarningMessage(), + }) + } + } + } + } +} + // checkCallExpr analyzes function calls for SQL with SELECT * usage // Includes checking arguments and SQL builders func checkCallExpr(pass *analysis.Pass, call *ast.CallExpr, cfg *config.UnqueryvetSettings) { - // Check SQL builders for SELECT * in arguments if cfg.CheckSQLBuilders && isSQLBuilderSelectStar(call) { pass.Report(analysis.Diagnostic{ diff --git a/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go index 42eb0259ae..c44104f5f9 100644 --- a/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go +++ b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go @@ -1,3 +1,4 @@ +// Package analyzer provides a static analysis tool that checks that fmt.Sprintf can be replaced with a faster alternative. package analyzer import ( @@ -118,7 +119,8 @@ func isConcatable(verb string) bool { if strings.Count(verb, "%[1]s") > 1 { return false } - return (hasPrefix || hasSuffix) && !(hasPrefix && hasSuffix) + // TODO handle case hasPrefix and hasSuffix + return (hasPrefix || hasSuffix) && !(hasPrefix && hasSuffix) //nolint:staticcheck } func isStringAdd(st *ast.AssignStmt, idname string) ast.Expr { @@ -171,6 +173,9 @@ func (n *perfSprint) reportConcatLoop(pass *analysis.Pass, neededPackages map[st if ok { _, ok = adds[id.Name] if ok { + if x.Tok == token.ASSIGN && isStringAdd(x, id.Name) == nil { + addTODO = id.Name + } return false } } @@ -198,11 +203,15 @@ func (n *perfSprint) reportConcatLoop(pass *analysis.Pass, neededPackages map[st // before the loop: declare the strings Builders // during the loop: replace concatenation with Builder.WriteString // after the loop: use the Builder.String to append to the pre-existing string + var prefixSb203 strings.Builder + var suffixSb203 strings.Builder for _, k := range keys { // lol - prefix += fmt.Sprintf("var %sSb%d strings.Builder\n", k, loopStartLine) - suffix += fmt.Sprintf("\n%s += %sSb%d.String()", k, k, loopStartLine) + prefixSb203.WriteString(fmt.Sprintf("var %sSb%d strings.Builder\n", k, loopStartLine)) + suffixSb203.WriteString(fmt.Sprintf("\n%s += %sSb%d.String()", k, k, loopStartLine)) } + prefix += prefixSb203.String() + suffix += suffixSb203.String() te := []analysis.TextEdit{ { Pos: node.Pos(), diff --git a/vendor/github.com/godoc-lint/godoc-lint/pkg/check/require_doc/require_doc.go b/vendor/github.com/godoc-lint/godoc-lint/pkg/check/require_doc/require_doc.go index 9b0ba1affa..afa16bd783 100644 --- a/vendor/github.com/godoc-lint/godoc-lint/pkg/check/require_doc/require_doc.go +++ b/vendor/github.com/godoc-lint/godoc-lint/pkg/check/require_doc/require_doc.go @@ -44,6 +44,13 @@ func (r *RequireDocChecker) Apply(actx *model.AnalysisContext) error { continue } + if decl.Name == "_" { + // Blank identifiers should be ignored; e.g.: + // + // var _ = 0 + continue + } + if decl.Doc != nil && (decl.Doc.DisabledRules.All || decl.Doc.DisabledRules.Rules.Has(requireDocRule)) { continue } diff --git a/vendor/github.com/godoc-lint/godoc-lint/pkg/check/start_with_name/start_with_name.go b/vendor/github.com/godoc-lint/godoc-lint/pkg/check/start_with_name/start_with_name.go index af83b99a03..f8c905aab4 100644 --- a/vendor/github.com/godoc-lint/godoc-lint/pkg/check/start_with_name/start_with_name.go +++ b/vendor/github.com/godoc-lint/godoc-lint/pkg/check/start_with_name/start_with_name.go @@ -45,6 +45,13 @@ func (r *StartWithNameChecker) Apply(actx *model.AnalysisContext) error { continue } + if decl.Name == "_" { + // Blank identifiers should be ignored; e.g.: + // + // var _ = 0 + continue + } + if decl.Kind == model.SymbolDeclKindBad { continue } diff --git a/vendor/github.com/godoc-lint/godoc-lint/pkg/config/plain.go b/vendor/github.com/godoc-lint/godoc-lint/pkg/config/plain.go index 4cb75d8d04..d19323f3ff 100644 --- a/vendor/github.com/godoc-lint/godoc-lint/pkg/config/plain.go +++ b/vendor/github.com/godoc-lint/godoc-lint/pkg/config/plain.go @@ -43,7 +43,7 @@ func transferOptions(target *model.RuleOptions, source *PlainRuleOptions) { resVT := resV.Type() resOptionMap := make(map[string]string, resVT.NumField()) - for i := 0; i < resVT.NumField(); i++ { + for i := range resVT.NumField() { ft := resVT.Field(i) key, ok := ft.Tag.Lookup("option") if !ok { @@ -54,7 +54,7 @@ func transferOptions(target *model.RuleOptions, source *PlainRuleOptions) { v := reflect.ValueOf(source).Elem() vt := v.Type() - for i := 0; i < vt.NumField(); i++ { + for i := range vt.NumField() { ft := vt.Field(i) key, ok := ft.Tag.Lookup("option") if !ok { diff --git a/vendor/github.com/godoc-lint/godoc-lint/pkg/inspect/inspector.go b/vendor/github.com/godoc-lint/godoc-lint/pkg/inspect/inspector.go index 7ea535ee84..0040be0e02 100644 --- a/vendor/github.com/godoc-lint/godoc-lint/pkg/inspect/inspector.go +++ b/vendor/github.com/godoc-lint/godoc-lint/pkg/inspect/inspector.go @@ -296,14 +296,13 @@ func (i *Inspector) extractCommentGroup(cg *ast.CommentGroup) *model.CommentGrou func extractDisableDirectivesInComment(s string) model.InspectorResultDisableRules { result := model.InspectorResultDisableRules{} for _, directive := range disableDirectivePattern.FindAllStringSubmatch(s, -1) { - args := string(directive[1]) + args := directive[1] if args == "" { result.All = true continue } - names := strings.Split(strings.TrimSpace(args), " ") - for _, name := range names { + for name := range strings.SplitSeq(strings.TrimSpace(args), " ") { if model.AllRules.Has(model.Rule(name)) { result.Rules = result.Rules.Add(model.Rule(name)) } diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/config_verify.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/config_verify.go index ef7a4e094d..1bbc47d8d6 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/config_verify.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/config_verify.go @@ -17,7 +17,7 @@ import ( "github.com/santhosh-tekuri/jsonschema/v6" "github.com/spf13/cobra" "github.com/spf13/pflag" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" "github.com/golangci/golangci-lint/v2/pkg/exitcodes" ) diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/custom.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/custom.go index 227df9bee1..e6a7f5ed71 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/custom.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/custom.go @@ -5,6 +5,7 @@ import ( "log" "os" + "github.com/fatih/color" "github.com/spf13/cobra" "github.com/golangci/golangci-lint/v2/pkg/commands/internal" @@ -13,11 +14,19 @@ import ( const envKeepTempFiles = "CUSTOM_GCL_KEEP_TEMP_FILES" +type customOptions struct { + version string + name string + destination string +} + type customCommand struct { cmd *cobra.Command cfg *internal.Configuration + opts customOptions + log logutils.Log } @@ -33,6 +42,13 @@ func newCustomCommand(logger logutils.Log) *customCommand { SilenceUsage: true, } + flagSet := customCmd.PersistentFlags() + flagSet.SortFlags = false // sort them as they are defined here + + flagSet.StringVar(&c.opts.version, "version", "", color.GreenString("The golangci-lint version used to build the custom binary")) + flagSet.StringVar(&c.opts.name, "name", "", color.GreenString("The name of the custom binary")) + flagSet.StringVar(&c.opts.destination, "destination", "", color.GreenString("The directory path used to store the custom binary")) + c.cmd = customCmd return c @@ -44,6 +60,18 @@ func (c *customCommand) preRunE(_ *cobra.Command, _ []string) error { return err } + if c.opts.version != "" { + cfg.Version = c.opts.version + } + + if c.opts.name != "" { + cfg.Name = c.opts.name + } + + if c.opts.destination != "" { + cfg.Destination = c.opts.destination + } + err = cfg.Validate() if err != nil { return err diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/builder.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/builder.go index bfd242f159..63f6f2f189 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/builder.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/builder.go @@ -92,7 +92,7 @@ func (b Builder) clone(ctx context.Context) error { //nolint:gosec // the variable is sanitized. cmd := exec.CommandContext(ctx, "git", "clone", "--branch", sanitizeVersion(b.cfg.Version), - "--single-branch", "--depth", "1", "-c advice.detachedHead=false", "-q", + "--single-branch", "--depth", "1", "-c", "advice.detachedHead=false", "-q", "https://github.com/golangci/golangci-lint.git", ) cmd.Dir = b.root diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/configuration.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/configuration.go index f9de4c47a7..0982c3eca4 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/configuration.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/configuration.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) const base = ".custom-gcl" diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/parser/parser.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/parser/parser.go index ea00b41f5a..293eaf18a5 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/parser/parser.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/parser/parser.go @@ -10,7 +10,7 @@ import ( "strings" "github.com/pelletier/go-toml/v2" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type File interface { diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/versionone/linters_settings.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/versionone/linters_settings.go index 44583b7d34..3c641541c3 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/versionone/linters_settings.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/versionone/linters_settings.go @@ -3,7 +3,7 @@ package versionone import ( "encoding" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" "github.com/golangci/golangci-lint/v2/pkg/commands/internal/migrate/ptr" ) diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/run.go index d1c96b88ee..93efa6d9cd 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/commands/run.go @@ -26,8 +26,8 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" "go.uber.org/automaxprocs/maxprocs" + "go.yaml.in/yaml/v3" "golang.org/x/mod/sumdb/dirhash" - "gopkg.in/yaml.v3" "github.com/golangci/golangci-lint/v2/internal/cache" "github.com/golangci/golangci-lint/v2/pkg/config" diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/config/linters_settings.go index ba60dd2c63..fefa94ca39 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/config/linters_settings.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/config/linters_settings.go @@ -834,15 +834,16 @@ type RecvcheckSettings struct { } type ReviveSettings struct { - Go string `mapstructure:"-"` - MaxOpenFiles int `mapstructure:"max-open-files"` - Confidence float64 `mapstructure:"confidence"` - Severity string `mapstructure:"severity"` - EnableAllRules bool `mapstructure:"enable-all-rules"` - Rules []ReviveRule `mapstructure:"rules"` - ErrorCode int `mapstructure:"error-code"` - WarningCode int `mapstructure:"warning-code"` - Directives []ReviveDirective `mapstructure:"directives"` + Go string `mapstructure:"-"` + MaxOpenFiles int `mapstructure:"max-open-files"` + Confidence float64 `mapstructure:"confidence"` + Severity string `mapstructure:"severity"` + EnableAllRules bool `mapstructure:"enable-all-rules"` + EnableDefaultRules bool `mapstructure:"enable-default-rules"` + Rules []ReviveRule `mapstructure:"rules"` + ErrorCode int `mapstructure:"error-code"` + WarningCode int `mapstructure:"warning-code"` + Directives []ReviveDirective `mapstructure:"directives"` } type ReviveRule struct { diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/goformatters/gci/internal/config/config.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/goformatters/gci/internal/config/config.go index c859b442fc..13ca6dd86f 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/goformatters/gci/internal/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/goformatters/gci/internal/config/config.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" "github.com/daixiang0/gci/pkg/config" "github.com/daixiang0/gci/pkg/section" diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/contextcheck/contextcheck.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/contextcheck/contextcheck.go index 88c71d2d3e..b01df7d989 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/contextcheck/contextcheck.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/contextcheck/contextcheck.go @@ -2,6 +2,8 @@ package contextcheck import ( "github.com/kkHAIKE/contextcheck" + "golang.org/x/tools/go/analysis/passes/ctrlflow" + "golang.org/x/tools/go/analysis/passes/inspect" "github.com/golangci/golangci-lint/v2/pkg/goanalysis" "github.com/golangci/golangci-lint/v2/pkg/lint/linter" @@ -9,6 +11,11 @@ import ( func New() *goanalysis.Linter { analyzer := contextcheck.NewAnalyzer(contextcheck.Configuration{}) + // TODO(ldez) there is a problem with this linter: + // I think the problem related to facts. + // The BuildSSA pass has been changed inside (0.39.0): + // https://github.com/golang/tools/commit/b74c09864920a69a4d2f6ef0ecb4f9cff226893a + analyzer.Requires = append(analyzer.Requires, ctrlflow.Analyzer, inspect.Analyzer) return goanalysis. NewLinterFromAnalyzer(analyzer). diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/forbidigo/forbidigo.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/forbidigo/forbidigo.go index 796faf3a6e..1473b4d6e3 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/forbidigo/forbidigo.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/forbidigo/forbidigo.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/ashanbrown/forbidigo/v2/forbidigo" + "go.yaml.in/yaml/v3" "golang.org/x/tools/go/analysis" - "gopkg.in/yaml.v3" "github.com/golangci/golangci-lint/v2/pkg/config" "github.com/golangci/golangci-lint/v2/pkg/goanalysis" diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/modernize/modernize.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/modernize/modernize.go index 97825c07e0..08cccdeb8a 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/modernize/modernize.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/modernize/modernize.go @@ -14,9 +14,9 @@ func New(settings *config.ModernizeSettings) *goanalysis.Linter { var analyzers []*analysis.Analyzer if settings == nil { - analyzers = modernize.Suite + analyzers = cleanSuite() } else { - for _, analyzer := range modernize.Suite { + for _, analyzer := range cleanSuite() { if slices.Contains(settings.Disable, analyzer.Name) { continue } @@ -32,3 +32,19 @@ func New(settings *config.ModernizeSettings) *goanalysis.Linter { nil). WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func cleanSuite() []*analysis.Analyzer { + var analyzers []*analysis.Analyzer + + for _, analyzer := range modernize.Suite { + // Disabled because of false positives + // https://github.com/golang/go/issues/76687 + if analyzer.Name == "stringscut" { + continue + } + + analyzers = append(analyzers, analyzer) + } + + return analyzers +} diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/revive/revive.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/revive/revive.go index 8e5a7835de..6799e1a428 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/revive/revive.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/golinters/revive/revive.go @@ -169,8 +169,8 @@ func (w *wrapper) toIssue(pass *analysis.Pass, failure *lint.Failure) *goanalysi // This function mimics the GetConfig function of revive. // This allows to get default values and right types. // https://github.com/golangci/golangci-lint/issues/1745 -// https://github.com/mgechev/revive/blob/v1.6.0/config/config.go#L230 -// https://github.com/mgechev/revive/blob/v1.6.0/config/config.go#L182-L188 +// https://github.com/mgechev/revive/blob/v1.13.0/config/config.go#L249 +// https://github.com/mgechev/revive/blob/v1.13.0/config/config.go#L198-L204 func getConfig(cfg *config.ReviveSettings) (*lint.Config, error) { conf := defaultConfig() @@ -269,7 +269,7 @@ func safeTomlSlice(r []any) []any { } // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/v1.12.0/config/config.go#L16 +// Extracted from https://github.com/mgechev/revive/blob/v1.13.0/config/config.go#L16 var defaultRules = []lint.Rule{ &rule.VarDeclarationsRule{}, &rule.PackageCommentsRule{}, @@ -325,6 +325,7 @@ var allRules = append([]lint.Rule{ &rule.FileLengthLimitRule{}, &rule.FilenameFormatRule{}, &rule.FlagParamRule{}, + &rule.ForbiddenCallInWgGoRule{}, &rule.FunctionLength{}, &rule.FunctionResultsLimitRule{}, &rule.GetReturnRule{}, @@ -337,6 +338,7 @@ var allRules = append([]lint.Rule{ &rule.ImportAliasNamingRule{}, &rule.ImportsBlocklistRule{}, &rule.ImportShadowingRule{}, + &rule.InefficientMapLookupRule{}, &rule.LineLengthLimitRule{}, &rule.MaxControlNestingRule{}, &rule.MaxPublicStructsRule{}, @@ -360,6 +362,7 @@ var allRules = append([]lint.Rule{ &rule.UnexportedNamingRule{}, &rule.UnhandledErrorRule{}, &rule.UnnecessaryFormatRule{}, + &rule.UnnecessaryIfRule{}, &rule.UnnecessaryStmtRule{}, &rule.UnsecureURLSchemeRule{}, &rule.UnusedReceiverRule{}, @@ -375,7 +378,7 @@ var allRules = append([]lint.Rule{ const defaultConfidence = 0.8 // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/v1.12.0/config/config.go#L206 +// Extracted from https://github.com/mgechev/revive/blob/v1.13.0/config/config.go#L209 func normalizeConfig(cfg *lint.Config) { // NOTE(ldez): this custom section for golangci-lint should be kept. // --- @@ -386,19 +389,22 @@ func normalizeConfig(cfg *lint.Config) { if len(cfg.Rules) == 0 { cfg.Rules = map[string]lint.RuleConfig{} } - if cfg.EnableAllRules { - // Add to the configuration all rules not yet present in it - for _, r := range allRules { + + addRules := func(config *lint.Config, rules []lint.Rule) { + for _, r := range rules { ruleName := r.Name() - _, alreadyInConf := cfg.Rules[ruleName] - if alreadyInConf { - continue + if _, ok := config.Rules[ruleName]; !ok { + config.Rules[ruleName] = lint.RuleConfig{} } - // Add the rule with an empty conf for - cfg.Rules[ruleName] = lint.RuleConfig{} } } + if cfg.EnableAllRules { + addRules(cfg, allRules) + } else if cfg.EnableDefaultRules { + addRules(cfg, defaultRules) + } + severity := cfg.Severity if severity != "" { for k, v := range cfg.Rules { @@ -417,7 +423,7 @@ func normalizeConfig(cfg *lint.Config) { } // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/v1.12.0/config/config.go#L274 +// Extracted from https://github.com/mgechev/revive/blob/v1.13.0/config/config.go#L280 func defaultConfig() *lint.Config { defaultConfig := lint.Config{ Confidence: defaultConfidence, @@ -463,7 +469,7 @@ func extractRulesName(rules []lint.Rule) []string { return names } -// Extracted from https://github.com/mgechev/revive/blob/v1.12.0/formatter/severity.go +// Extracted from https://github.com/mgechev/revive/blob/v1.13.0/formatter/severity.go // Modified to use pointers (related to hugeParam rule). func severity(cfg *lint.Config, failure *lint.Failure) lint.Severity { if cfg, ok := cfg.Rules[failure.RuleName]; ok && cfg.Severity == lint.SeverityError { diff --git a/vendor/github.com/golangci/golangci-lint/v2/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/v2/pkg/lint/linter/config.go index 0287dece95..a5b98413d4 100644 --- a/vendor/github.com/golangci/golangci-lint/v2/pkg/lint/linter/config.go +++ b/vendor/github.com/golangci/golangci-lint/v2/pkg/lint/linter/config.go @@ -4,8 +4,8 @@ import ( "bytes" "fmt" + "go.yaml.in/yaml/v3" "golang.org/x/tools/go/packages" - "gopkg.in/yaml.v3" "github.com/golangci/golangci-lint/v2/pkg/config" ) diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE index 1409d6ab92..bb1e9a486a 100644 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014 HashiCorp, Inc. +Copyright IBM Corp. 2014, 2025 Mozilla Public License, version 2.0 diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 4b7806cd96..83a8249f72 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,6 +1,7 @@ # Versioning Library for Go + ![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) -[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-version.svg)](https://pkg.go.dev/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version @@ -12,7 +13,7 @@ Versions used with go-version must follow [SemVer](http://semver.org/). ## Installation and Usage Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). +[Go Reference](https://pkg.go.dev/github.com/hashicorp/go-version). Installation can be done with a normal `go get`: diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 29bdc4d2b5..3964da070d 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version @@ -8,8 +8,26 @@ import ( "regexp" "sort" "strings" + "sync" ) +var ( + constraintRegexp *regexp.Regexp + constraintRegexpOnce sync.Once +) + +func getConstraintRegexp() *regexp.Regexp { + constraintRegexpOnce.Do(func() { + // This heavy lifting only happens the first time this function is called + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + `<=|>=|!=|~>|<|>|=|`, + VersionRegexpRaw, + )) + }) + return constraintRegexp +} + // Constraint represents a single constraint for a version, such as // ">= 1.0". type Constraint struct { @@ -29,38 +47,11 @@ type Constraints []*Constraint type constraintFunc func(v, c *Version) bool -var constraintOperators map[string]constraintOperation - type constraintOperation struct { op operator f constraintFunc } -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintOperation{ - "": {op: equal, f: constraintEqual}, - "=": {op: equal, f: constraintEqual}, - "!=": {op: notEqual, f: constraintNotEqual}, - ">": {op: greaterThan, f: constraintGreaterThan}, - "<": {op: lessThan, f: constraintLessThan}, - ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, - "<=": {op: lessThanEqual, f: constraintLessThanEqual}, - "~>": {op: pessimistic, f: constraintPessimistic}, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - // NewConstraint will parse one or more constraints from the given // constraint string. The string must be a comma-separated list of // constraints. @@ -107,7 +98,7 @@ func (cs Constraints) Check(v *Version) bool { // to '>0.2' it is *NOT* treated as equal. // // Missing operator is treated as equal to '=', whitespaces -// are ignored and constraints are sorted before comaparison. +// are ignored and constraints are sorted before comparison. func (cs Constraints) Equals(c Constraints) bool { if len(cs) != len(c) { return false @@ -176,9 +167,9 @@ func (c *Constraint) String() string { } func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) + matches := getConstraintRegexp().FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) + return nil, fmt.Errorf("malformed constraint: %s", v) } check, err := NewVersion(matches[2]) @@ -186,7 +177,25 @@ func parseSingle(v string) (*Constraint, error) { return nil, err } - cop := constraintOperators[matches[1]] + var cop constraintOperation + switch matches[1] { + case "=": + cop = constraintOperation{op: equal, f: constraintEqual} + case "!=": + cop = constraintOperation{op: notEqual, f: constraintNotEqual} + case ">": + cop = constraintOperation{op: greaterThan, f: constraintGreaterThan} + case "<": + cop = constraintOperation{op: lessThan, f: constraintLessThan} + case ">=": + cop = constraintOperation{op: greaterThanEqual, f: constraintGreaterThanEqual} + case "<=": + cop = constraintOperation{op: lessThanEqual, f: constraintLessThanEqual} + case "~>": + cop = constraintOperation{op: pessimistic, f: constraintPessimistic} + default: + cop = constraintOperation{op: equal, f: constraintEqual} + } return &Constraint{ f: cop.f, diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 7c683c2813..17b29732ee 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -1,23 +1,39 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version import ( - "bytes" "database/sql/driver" "fmt" "regexp" "strconv" "strings" + "sync" ) // The compiled regular expression used to test the validity of a version. var ( - versionRegexp *regexp.Regexp - semverRegexp *regexp.Regexp + versionRegexp *regexp.Regexp + versionRegexpOnce sync.Once + semverRegexp *regexp.Regexp + semverRegexpOnce sync.Once ) +func getVersionRegexp() *regexp.Regexp { + versionRegexpOnce.Do(func() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + }) + return versionRegexp +} + +func getSemverRegexp() *regexp.Regexp { + semverRegexpOnce.Do(func() { + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") + }) + return semverRegexp +} + // The raw regular expression string used for testing the validity // of a version. const ( @@ -42,28 +58,23 @@ type Version struct { original string } -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") - semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") -} - // NewVersion parses the given version and returns a new // Version. func NewVersion(v string) (*Version, error) { - return newVersion(v, versionRegexp) + return newVersion(v, getVersionRegexp()) } // NewSemver parses the given version and returns a new // Version that adheres strictly to SemVer specs // https://semver.org/ func NewSemver(v string) (*Version, error) { - return newVersion(v, semverRegexp) + return newVersion(v, getSemverRegexp()) } func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { matches := pattern.FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) + return nil, fmt.Errorf("malformed version: %s", v) } segmentsStr := strings.Split(matches[1], ".") segments := make([]int64, len(segmentsStr)) @@ -71,7 +82,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { val, err := strconv.ParseInt(str, 10, 64) if err != nil { return nil, fmt.Errorf( - "Error parsing version: %s", err) + "error parsing version: %s", err) } segments[i] = val @@ -174,7 +185,7 @@ func (v *Version) Compare(other *Version) int { } else if lhs < rhs { return -1 } - // Otherwis, rhs was > lhs, they're not equal + // Otherwise, rhs was > lhs, they're not equal return 1 } @@ -382,22 +393,29 @@ func (v *Version) Segments64() []int64 { // missing parts (1.0 => 1.0.0) will be made into a canonicalized form // as shown in the parenthesized examples. func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) + return string(v.bytes()) +} + +func (v *Version) bytes() []byte { + var buf []byte for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str + if i > 0 { + buf = append(buf, '.') + } + buf = strconv.AppendInt(buf, s, 10) } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) + buf = append(buf, '-') + buf = append(buf, v.pre...) } + if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) + buf = append(buf, '+') + buf = append(buf, v.metadata...) } - return buf.String() + return buf } // Original returns the original parsed version as-is, including any diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go index 83547fe13d..11bc8b1c56 100644 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version diff --git a/vendor/github.com/mgechev/revive/config/config.go b/vendor/github.com/mgechev/revive/config/config.go index e03d337ff2..2246444795 100644 --- a/vendor/github.com/mgechev/revive/config/config.go +++ b/vendor/github.com/mgechev/revive/config/config.go @@ -113,6 +113,9 @@ var allRules = append([]lint.Rule{ &rule.PackageDirectoryMismatchRule{}, &rule.UseWaitGroupGoRule{}, &rule.UnsecureURLSchemeRule{}, + &rule.InefficientMapLookupRule{}, + &rule.ForbiddenCallInWgGoRule{}, + &rule.UnnecessaryIfRule{}, }, defaultRules...) // allFormatters is a list of all available formatters to output the linting results. @@ -207,19 +210,22 @@ func normalizeConfig(config *lint.Config) { if len(config.Rules) == 0 { config.Rules = map[string]lint.RuleConfig{} } - if config.EnableAllRules { - // Add to the configuration all rules not yet present in it - for _, r := range allRules { + + addRules := func(config *lint.Config, rules []lint.Rule) { + for _, r := range rules { ruleName := r.Name() - _, alreadyInConf := config.Rules[ruleName] - if alreadyInConf { - continue + if _, ok := config.Rules[ruleName]; !ok { + config.Rules[ruleName] = lint.RuleConfig{} } - // Add the rule with an empty conf for - config.Rules[ruleName] = lint.RuleConfig{} } } + if config.EnableAllRules { + addRules(config, allRules) + } else if config.EnableDefaultRules { + addRules(config, defaultRules) + } + severity := config.Severity if severity != "" { for k, v := range config.Rules { diff --git a/vendor/github.com/mgechev/revive/lint/config.go b/vendor/github.com/mgechev/revive/lint/config.go index ae9a20a7cf..3047fd29de 100644 --- a/vendor/github.com/mgechev/revive/lint/config.go +++ b/vendor/github.com/mgechev/revive/lint/config.go @@ -60,6 +60,7 @@ type Config struct { Confidence float64 `toml:"confidence"` Severity Severity `toml:"severity"` EnableAllRules bool `toml:"enableAllRules"` + EnableDefaultRules bool `toml:"enableDefaultRules"` Rules RulesConfig `toml:"rule"` ErrorCode int `toml:"errorCode"` WarningCode int `toml:"warningCode"` diff --git a/vendor/github.com/mgechev/revive/lint/failure.go b/vendor/github.com/mgechev/revive/lint/failure.go index d9ff93e823..c25df4836b 100644 --- a/vendor/github.com/mgechev/revive/lint/failure.go +++ b/vendor/github.com/mgechev/revive/lint/failure.go @@ -11,6 +11,8 @@ const ( // FailureCategoryBadPractice indicates bad practice issues. FailureCategoryBadPractice FailureCategory = "bad practice" // FailureCategoryCodeStyle indicates code style issues. + // + // Deprecated: use FailureCategoryStyle instead. FailureCategoryCodeStyle FailureCategory = "code-style" // FailureCategoryComments indicates comment issues. FailureCategoryComments FailureCategory = "comments" diff --git a/vendor/github.com/mgechev/revive/rule/confusing_naming.go b/vendor/github.com/mgechev/revive/rule/confusing_naming.go index 774eb04ee1..83f53a5965 100644 --- a/vendor/github.com/mgechev/revive/rule/confusing_naming.go +++ b/vendor/github.com/mgechev/revive/rule/confusing_naming.go @@ -167,6 +167,11 @@ func checkStructFields(fields *ast.FieldList, structName string, w *lintConfusin bl := make(map[string]bool, len(fields.List)) for _, f := range fields.List { for _, id := range f.Names { + // Skip blank identifiers + if id.Name == "_" { + continue + } + normName := strings.ToUpper(id.Name) if bl[normName] { w.onFailure(lint.Failure{ diff --git a/vendor/github.com/mgechev/revive/rule/deep_exit.go b/vendor/github.com/mgechev/revive/rule/deep_exit.go index 6f7acd305f..ed3e34b53a 100644 --- a/vendor/github.com/mgechev/revive/rule/deep_exit.go +++ b/vendor/github.com/mgechev/revive/rule/deep_exit.go @@ -7,6 +7,7 @@ import ( "unicode" "unicode/utf8" + "github.com/mgechev/revive/internal/astutils" "github.com/mgechev/revive/lint" ) @@ -64,12 +65,18 @@ func (w *lintDeepExit) Visit(node ast.Node) ast.Visitor { pkg := id.Name fn := fc.Sel.Name - if isCallToExitFunction(pkg, fn) { + if isCallToExitFunction(pkg, fn, ce.Args) { + msg := fmt.Sprintf("calls to %s.%s only in main() or init() functions", pkg, fn) + + if pkg == "flag" && fn == "NewFlagSet" && + len(ce.Args) == 2 && astutils.IsPkgDotName(ce.Args[1], "flag", "ExitOnError") { + msg = "calls to flag.NewFlagSet with flag.ExitOnError only in main() or init() functions" + } w.onFailure(lint.Failure{ Confidence: 1, Node: ce, Category: lint.FailureCategoryBadPractice, - Failure: fmt.Sprintf("calls to %s.%s only in main() or init() functions", pkg, fn), + Failure: msg, }) } diff --git a/vendor/github.com/mgechev/revive/rule/defer.go b/vendor/github.com/mgechev/revive/rule/defer.go index 9cab004aee..a85336846c 100644 --- a/vendor/github.com/mgechev/revive/rule/defer.go +++ b/vendor/github.com/mgechev/revive/rule/defer.go @@ -87,7 +87,7 @@ type lintDeferRule struct { onFailure func(lint.Failure) inALoop bool inADefer bool - inAFuncLit bool + inAFuncLit byte // 0 = not in func lit, 1 = in top-level func lit, >1 = nested func lit allow map[string]bool } @@ -100,10 +100,10 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { w.visitSubtree(n.Body, w.inADefer, true, w.inAFuncLit) return nil case *ast.FuncLit: - w.visitSubtree(n.Body, w.inADefer, false, true) + w.visitSubtree(n.Body, w.inADefer, false, w.inAFuncLit+1) return nil case *ast.ReturnStmt: - if len(n.Results) != 0 && w.inADefer && w.inAFuncLit { + if len(n.Results) != 0 && w.inADefer && w.inAFuncLit == 1 { w.newFailure("return in a defer function has no effect", n, 1.0, lint.FailureCategoryLogic, deferOptionReturn) } case *ast.CallExpr: @@ -114,7 +114,7 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { // // confidence is not 1 because recover can be in a function that is deferred elsewhere w.newFailure("recover must be called inside a deferred function", n, 0.8, lint.FailureCategoryLogic, deferOptionRecover) - case w.inADefer && !w.inAFuncLit && isCallToRecover: + case w.inADefer && w.inAFuncLit == 0 && isCallToRecover: // defer helper(recover()) // // confidence is not truly 1 because this could be in a correctly-deferred func, @@ -130,13 +130,13 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { // but normally this doesn't suppress a panic, and even if it did it would silently discard the value. w.newFailure("recover must be called inside a deferred function, this is executing recover immediately", n, 1, lint.FailureCategoryLogic, deferOptionImmediateRecover) } - w.visitSubtree(n.Call.Fun, true, false, false) + w.visitSubtree(n.Call.Fun, true, false, 0) for _, a := range n.Call.Args { switch a.(type) { case *ast.FuncLit: continue // too hard to analyze deferred calls with func literals args default: - w.visitSubtree(a, true, false, false) // check arguments, they should not contain recover() + w.visitSubtree(a, true, false, 0) // check arguments, they should not contain recover() } } @@ -162,7 +162,7 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { return w } -func (w lintDeferRule) visitSubtree(n ast.Node, inADefer, inALoop, inAFuncLit bool) { +func (w lintDeferRule) visitSubtree(n ast.Node, inADefer, inALoop bool, inAFuncLit byte) { nw := lintDeferRule{ onFailure: w.onFailure, inADefer: inADefer, diff --git a/vendor/github.com/mgechev/revive/rule/file_length_limit.go b/vendor/github.com/mgechev/revive/rule/file_length_limit.go index a9b4e8da93..c24db43535 100644 --- a/vendor/github.com/mgechev/revive/rule/file_length_limit.go +++ b/vendor/github.com/mgechev/revive/rule/file_length_limit.go @@ -57,7 +57,7 @@ func (r *FileLengthLimitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Fa return []lint.Failure{ { - Category: lint.FailureCategoryCodeStyle, + Category: lint.FailureCategoryStyle, Confidence: 1, Position: lint.FailurePosition{ Start: token.Position{ diff --git a/vendor/github.com/mgechev/revive/rule/forbidden_call_in_wg_go.go b/vendor/github.com/mgechev/revive/rule/forbidden_call_in_wg_go.go new file mode 100644 index 0000000000..63088e554e --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/forbidden_call_in_wg_go.go @@ -0,0 +1,113 @@ +package rule + +import ( + "fmt" + "go/ast" + "strings" + + "github.com/mgechev/revive/internal/astutils" + "github.com/mgechev/revive/lint" +) + +// ForbiddenCallInWgGoRule spots calls to panic or wg.Done when using WaitGroup.Go. +type ForbiddenCallInWgGoRule struct{} + +// Apply applies the rule to given file. +func (*ForbiddenCallInWgGoRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + if !file.Pkg.IsAtLeastGoVersion(lint.Go125) { + return nil // skip analysis if Go version < 1.25 + } + + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := &lintForbiddenCallInWgGo{ + onFailure: onFailure, + } + + // Iterate over declarations looking for function declarations + for _, decl := range file.AST.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok { + continue // not a function + } + + if fn.Body == nil { + continue // external (no-Go) function + } + + // Analyze the function body + ast.Walk(w, fn.Body) + } + + return failures +} + +// Name returns the rule name. +func (*ForbiddenCallInWgGoRule) Name() string { + return "forbidden-call-in-wg-go" +} + +type lintForbiddenCallInWgGo struct { + onFailure func(lint.Failure) +} + +func (w *lintForbiddenCallInWgGo) Visit(node ast.Node) ast.Visitor { + call, ok := node.(*ast.CallExpr) + if !ok { + return w // not a call of statements + } + + if !astutils.IsPkgDotName(call.Fun, "wg", "Go") { + return w // not a call to wg.Go + } + + if len(call.Args) != 1 { + return nil // no argument (impossible) + } + + funcLit, ok := call.Args[0].(*ast.FuncLit) + if !ok { + return nil // the argument is not a function literal + } + + var callee string + + forbiddenCallPicker := func(n ast.Node) bool { + call, ok := n.(*ast.CallExpr) + if !ok { + return false + } + + if astutils.IsPkgDotName(call.Fun, "wg", "Done") || + astutils.IsIdent(call.Fun, "panic") || + astutils.IsPkgDotName(call.Fun, "log", "Panic") || + astutils.IsPkgDotName(call.Fun, "log", "Panicf") || + astutils.IsPkgDotName(call.Fun, "log", "Panicln") { + callee = astutils.GoFmt(n) + callee, _, _ = strings.Cut(callee, "(") + return true + } + + return false + } + + // search a forbidden call in the body of the function literal + forbiddenCall := astutils.SeekNode[*ast.CallExpr](funcLit.Body, forbiddenCallPicker) + if forbiddenCall == nil { + return nil // there is no forbidden call in the call to wg.Go + } + + msg := fmt.Sprintf("do not call %s inside wg.Go", callee) + w.onFailure(lint.Failure{ + Confidence: 1, + Node: forbiddenCall, + Category: lint.FailureCategoryErrors, + Failure: msg, + }) + + return nil +} diff --git a/vendor/github.com/mgechev/revive/rule/import_shadowing.go b/vendor/github.com/mgechev/revive/rule/import_shadowing.go index 09520785c0..0028d96737 100644 --- a/vendor/github.com/mgechev/revive/rule/import_shadowing.go +++ b/vendor/github.com/mgechev/revive/rule/import_shadowing.go @@ -13,12 +13,12 @@ import ( type ImportShadowingRule struct{} // Apply applies the rule to given file. -func (*ImportShadowingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *ImportShadowingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure importNames := map[string]struct{}{} for _, imp := range file.AST.Imports { - importNames[getName(imp)] = struct{}{} + importNames[r.getName(imp)] = struct{}{} } fileAst := file.AST @@ -42,20 +42,23 @@ func (*ImportShadowingRule) Name() string { return "import-shadowing" } -func getName(imp *ast.ImportSpec) string { +func (r *ImportShadowingRule) getName(imp *ast.ImportSpec) string { const pathSep = "/" const strDelim = `"` if imp.Name != nil { return imp.Name.Name } - path := imp.Path.Value - i := strings.LastIndex(path, pathSep) - if i == -1 { - return strings.Trim(path, strDelim) + path := strings.Trim(imp.Path.Value, strDelim) + parts := strings.Split(path, pathSep) + + lastSegment := parts[len(parts)-1] + if r.isVersion(lastSegment) && len(parts) >= 2 { + // Use the previous segment when current is a version (v1, v2, etc.). + return parts[len(parts)-2] } - return strings.Trim(path[i+1:], strDelim) + return lastSegment } type importShadowing struct { @@ -113,3 +116,17 @@ func (w importShadowing) Visit(n ast.Node) ast.Visitor { return w } + +func (*ImportShadowingRule) isVersion(name string) bool { + if len(name) < 2 || (name[0] != 'v' && name[0] != 'V') { + return false + } + + for i := 1; i < len(name); i++ { + if name[i] < '0' || name[i] > '9' { + return false + } + } + + return true +} diff --git a/vendor/github.com/mgechev/revive/rule/inefficient_map_lookup.go b/vendor/github.com/mgechev/revive/rule/inefficient_map_lookup.go new file mode 100644 index 0000000000..b6e4bf9216 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/inefficient_map_lookup.go @@ -0,0 +1,169 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/mgechev/revive/internal/astutils" + "github.com/mgechev/revive/lint" +) + +// InefficientMapLookupRule spots potential inefficient map lookups. +type InefficientMapLookupRule struct{} + +// Apply applies the rule to given file. +func (*InefficientMapLookupRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := &lintInefficientMapLookup{ + file: file, + onFailure: onFailure, + } + + if err := file.Pkg.TypeCheck(); err != nil { + return []lint.Failure{ + lint.NewInternalFailure(fmt.Sprintf("Unable to type check file %q: %v", file.Name, err)), + } + } + + // Iterate over declarations looking for function declarations + for _, decl := range file.AST.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok { + continue // not a function + } + + if fn.Body == nil { + continue // external (no-Go) function + } + + // Analyze the function body + ast.Walk(w, fn.Body) + } + + return failures +} + +// Name returns the rule name. +func (*InefficientMapLookupRule) Name() string { + return "inefficient-map-lookup" +} + +type lintInefficientMapLookup struct { + file *lint.File + onFailure func(lint.Failure) +} + +func (w *lintInefficientMapLookup) Visit(node ast.Node) ast.Visitor { + // Only interested in blocks of statements + block, ok := node.(*ast.BlockStmt) + if !ok { + return w // not a block of statements + } + + w.analyzeBlock(block) + + return w +} + +// analyzeBlock searches AST subtrees with the following form +// +// for := range { +// if == { +// ... +// } +func (w *lintInefficientMapLookup) analyzeBlock(b *ast.BlockStmt) { + for _, stmt := range b.List { + if !w.isRangeOverMapKey(stmt) { + continue + } + + rangeOverMap := stmt.(*ast.RangeStmt) + key := rangeOverMap.Key.(*ast.Ident) + + // Here we have identified a range over the keys of a map + // Let's check if the range body is + // { if == { ... } } + // or + // { if != { continue } ... } + if !isKeyLookup(key.Name, rangeOverMap.Body) { + continue + } + + w.onFailure(lint.Failure{ + Confidence: 1, + Node: rangeOverMap, + Category: lint.FailureCategoryStyle, + Failure: "inefficient lookup of map key", + }) + } +} + +func isKeyLookup(keyName string, blockStmt *ast.BlockStmt) bool { + blockLen := len(blockStmt.List) + if blockLen == 0 { + return false // empty + } + + firstStmt := blockStmt.List[0] + ifStmt, ok := firstStmt.(*ast.IfStmt) + if !ok { + return false // the first statement of the body is not an if + } + + binExp, ok := ifStmt.Cond.(*ast.BinaryExpr) + if !ok { + return false // the if condition is not a binary expression + } + + if !astutils.IsIdent(binExp.X, keyName) { + return false // the if condition is not + } + + switch binExp.Op { + case token.EQL: + // if key == ... should be the single statement in the block + return blockLen == 1 + + case token.NEQ: + // if key != ... + ifBodyStmts := ifStmt.Body.List + if len(ifBodyStmts) < 1 { + return false // if key != ... { /* empty */ } + } + + branchStmt, ok := ifBodyStmts[0].(*ast.BranchStmt) + if !ok || branchStmt.Tok != token.CONTINUE { + return false // if key != ... { } + } + + return true + } + + return false +} + +func (w *lintInefficientMapLookup) isRangeOverMapKey(stmt ast.Stmt) bool { + rangeStmt, ok := stmt.(*ast.RangeStmt) + if !ok { + return false // not a range + } + + // Check if we range only on key + // for key := range ... + // for key, _ := range ... + hasValueVariable := rangeStmt.Value != nil && !astutils.IsIdent(rangeStmt.Value, "_") + if hasValueVariable { + return false // range over both key and value + } + + // Check if we range over a map + t := w.file.Pkg.TypeOf(rangeStmt.X) + return t != nil && strings.HasPrefix(t.String(), "map[") +} diff --git a/vendor/github.com/mgechev/revive/rule/line_length_limit.go b/vendor/github.com/mgechev/revive/rule/line_length_limit.go index 0c4c57691b..5d0653975b 100644 --- a/vendor/github.com/mgechev/revive/rule/line_length_limit.go +++ b/vendor/github.com/mgechev/revive/rule/line_length_limit.go @@ -76,7 +76,7 @@ func (r lintLineLengthNum) check() { c := utf8.RuneCountInString(t) if c > r.max { r.onFailure(lint.Failure{ - Category: lint.FailureCategoryCodeStyle, + Category: lint.FailureCategoryStyle, Position: lint.FailurePosition{ // Offset not set; it is non-trivial, and doesn't appear to be needed. Start: token.Position{ diff --git a/vendor/github.com/mgechev/revive/rule/package_directory_mismatch.go b/vendor/github.com/mgechev/revive/rule/package_directory_mismatch.go index b150ae6070..7178054735 100644 --- a/vendor/github.com/mgechev/revive/rule/package_directory_mismatch.go +++ b/vendor/github.com/mgechev/revive/rule/package_directory_mismatch.go @@ -2,6 +2,7 @@ package rule import ( "fmt" + "os" "path/filepath" "regexp" "strings" @@ -96,7 +97,12 @@ func (r *PackageDirectoryMismatchRule) Apply(file *lint.File, _ lint.Arguments) return nil } - dirPath := filepath.Dir(file.Name) + absPath, err := filepath.Abs(file.Name) + if err != nil { + return nil + } + + dirPath := filepath.Dir(absPath) dirName := filepath.Base(dirPath) if r.ignoredDirs != nil && r.ignoredDirs.MatchString(dirPath) { @@ -120,7 +126,15 @@ func (r *PackageDirectoryMismatchRule) Apply(file *lint.File, _ lint.Arguments) return nil } + if isRootDir(dirPath) { + return nil + } + if file.IsTest() { + // treat main_test differently because it's a common package name for tests + if packageName == "main_test" { + return nil + } // External test package (directory + '_test' suffix) if r.semanticallyEqual(packageName, dirName+"_test") { return nil @@ -137,6 +151,13 @@ func (r *PackageDirectoryMismatchRule) Apply(file *lint.File, _ lint.Arguments) return nil } + if file.IsTest() { + // External test package (directory + '_test' suffix) + if r.semanticallyEqual(packageName, parentDirName+"_test") { + return nil + } + } + failure = fmt.Sprintf("package name %q does not match directory name %q or parent directory name %q", packageName, dirName, parentDirName) } @@ -150,6 +171,23 @@ func (r *PackageDirectoryMismatchRule) Apply(file *lint.File, _ lint.Arguments) } } +// isRootDir checks if the given directory contains go.mod or .git, indicating it's a root directory. +func isRootDir(dirPath string) bool { + entries, err := os.ReadDir(dirPath) + if err != nil { + return false + } + + for _, e := range entries { + switch e.Name() { + case "go.mod", ".git": + return true + } + } + + return false +} + // Name returns the rule name. func (*PackageDirectoryMismatchRule) Name() string { return "package-directory-mismatch" diff --git a/vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go b/vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go index 01a90a47c6..717969ef6b 100644 --- a/vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go +++ b/vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go @@ -66,7 +66,7 @@ func (w *lintRedundantTestMainExit) Visit(node ast.Node) ast.Visitor { pkg := id.Name fn := fc.Sel.Name - if isCallToExitFunction(pkg, fn) { + if isCallToExitFunction(pkg, fn, ce.Args) { w.onFailure(lint.Failure{ Confidence: 1, Node: ce, diff --git a/vendor/github.com/mgechev/revive/rule/struct_tag.go b/vendor/github.com/mgechev/revive/rule/struct_tag.go index edb6f9581b..0830a257de 100644 --- a/vendor/github.com/mgechev/revive/rule/struct_tag.go +++ b/vendor/github.com/mgechev/revive/rule/struct_tag.go @@ -16,6 +16,7 @@ import ( // StructTagRule lints struct tags. type StructTagRule struct { userDefined map[tagKey][]string // map: key -> []option + omittedTags map[tagKey]struct{} // set of tags that must not be analyzed } type tagKey string @@ -23,6 +24,8 @@ type tagKey string const ( keyASN1 tagKey = "asn1" keyBSON tagKey = "bson" + keyCbor tagKey = "cbor" + keyCodec tagKey = "codec" keyDatastore tagKey = "datastore" keyDefault tagKey = "default" keyJSON tagKey = "json" @@ -38,11 +41,13 @@ const ( keyYAML tagKey = "yaml" ) -type tagChecker func(checkCtx *checkContext, tag *structtag.Tag, fieldType ast.Expr) (message string, succeeded bool) +type tagChecker func(checkCtx *checkContext, tag *structtag.Tag, field *ast.Field) (message string, succeeded bool) var tagCheckers = map[tagKey]tagChecker{ keyASN1: checkASN1Tag, keyBSON: checkBSONTag, + keyCbor: checkCborTag, + keyCodec: checkCodecTag, keyDatastore: checkDatastoreTag, keyDefault: checkDefaultTag, keyJSON: checkJSONTag, @@ -62,6 +67,7 @@ type checkContext struct { userDefined map[tagKey][]string // map: key -> []option usedTagNbr map[int]bool // list of used tag numbers usedTagName map[string]bool // list of used tag keys + commonOptions map[string]bool // list of options defined for all fields isAtLeastGo124 bool } @@ -74,6 +80,23 @@ func (checkCtx checkContext) isUserDefined(key tagKey, opt string) bool { return slices.Contains(options, opt) } +func (checkCtx *checkContext) isCommonOption(opt string) bool { + if checkCtx.commonOptions == nil { + return false + } + + _, ok := checkCtx.commonOptions[opt] + return ok +} + +func (checkCtx *checkContext) addCommonOption(opt string) { + if checkCtx.commonOptions == nil { + checkCtx.commonOptions = map[string]bool{} + } + + checkCtx.commonOptions[opt] = true +} + // Configure validates the rule configuration, and configures the rule accordingly. // // Configuration implements the [lint.ConfigurableRule] interface. @@ -87,17 +110,23 @@ func (r *StructTagRule) Configure(arguments lint.Arguments) error { return err } - r.userDefined = make(map[tagKey][]string, len(arguments)) + r.userDefined = map[tagKey][]string{} + r.omittedTags = map[tagKey]struct{}{} for _, arg := range arguments { item, ok := arg.(string) if !ok { return fmt.Errorf("invalid argument to the %s rule. Expecting a string, got %v (of type %T)", r.Name(), arg, arg) } + parts := strings.Split(item, ",") - if len(parts) < 2 { - return fmt.Errorf("invalid argument to the %s rule. Expecting a string of the form key[,option]+, got %s", r.Name(), item) + keyStr := strings.TrimSpace(parts[0]) + keyStr, isOmitted := strings.CutPrefix(keyStr, "!") + key := tagKey(keyStr) + if isOmitted { + r.omittedTags[key] = struct{}{} + continue } - key := tagKey(strings.TrimSpace(parts[0])) + for i := 1; i < len(parts); i++ { option := strings.TrimSpace(parts[i]) r.userDefined[key] = append(r.userDefined[key], option) @@ -117,6 +146,7 @@ func (r *StructTagRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure w := lintStructTagRule{ onFailure: onFailure, userDefined: r.userDefined, + omittedTags: r.omittedTags, isAtLeastGo124: file.Pkg.IsAtLeastGoVersion(lint.Go124), tagCheckers: tagCheckers, } @@ -134,6 +164,7 @@ func (*StructTagRule) Name() string { type lintStructTagRule struct { onFailure func(lint.Failure) userDefined map[tagKey][]string // map: key -> []option + omittedTags map[tagKey]struct{} isAtLeastGo124 bool tagCheckers map[tagKey]tagChecker } @@ -164,36 +195,71 @@ func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { // checkTaggedField checks the tag of the given field. // precondition: the field has a tag -func (w lintStructTagRule) checkTaggedField(checkCtx *checkContext, f *ast.Field) { - if len(f.Names) > 0 && !f.Names[0].IsExported() { - w.addFailuref(f, "tag on not-exported field %s", f.Names[0].Name) - } - - tags, err := structtag.Parse(strings.Trim(f.Tag.Value, "`")) +func (w lintStructTagRule) checkTaggedField(checkCtx *checkContext, field *ast.Field) { + tags, err := structtag.Parse(strings.Trim(field.Tag.Value, "`")) if err != nil || tags == nil { - w.addFailuref(f.Tag, "malformed tag") + w.addFailuref(field.Tag, "malformed tag") return } + analyzedTags := map[tagKey]struct{}{} for _, tag := range tags.Tags() { + _, mustOmit := w.omittedTags[tagKey(tag.Key)] + if mustOmit { + continue + } + if msg, ok := w.checkTagNameIfNeed(checkCtx, tag); !ok { - w.addFailureWithTagKey(f.Tag, msg, tag.Key) + w.addFailureWithTagKey(field.Tag, msg, tag.Key) } if msg, ok := checkOptionsOnIgnoredField(tag); !ok { - w.addFailureWithTagKey(f.Tag, msg, tag.Key) + w.addFailureWithTagKey(field.Tag, msg, tag.Key) } - checker, ok := w.tagCheckers[tagKey(tag.Key)] + key := tagKey(tag.Key) + checker, ok := w.tagCheckers[key] if !ok { continue // we don't have a checker for the tag } - msg, ok := checker(checkCtx, tag, f.Type) + msg, ok := checker(checkCtx, tag, field) if !ok { - w.addFailureWithTagKey(f.Tag, msg, tag.Key) + w.addFailureWithTagKey(field.Tag, msg, tag.Key) + } + + analyzedTags[key] = struct{}{} + } + + if w.shallWarnOnUnexportedField(field.Names, analyzedTags) { + w.addFailuref(field, "tag on not-exported field %s", field.Names[0].Name) + } +} + +// tagKeyToSpecialField maps tag keys to their "special" meaning struct fields. +var tagKeyToSpecialField = map[tagKey]string{ + "codec": structTagCodecSpecialField, +} + +func (lintStructTagRule) shallWarnOnUnexportedField(fieldNames []*ast.Ident, tags map[tagKey]struct{}) bool { + if len(fieldNames) != 1 { // only handle the case of single field name (99.999% of cases) + return false + } + + if fieldNames[0].IsExported() { + return false + } + + fieldNameStr := fieldNames[0].Name + + for key := range tags { + specialField, ok := tagKeyToSpecialField[key] + if ok && specialField == fieldNameStr { + return false } } + + return true } func (w lintStructTagRule) checkTagNameIfNeed(checkCtx *checkContext, tag *structtag.Tag) (message string, succeeded bool) { @@ -204,7 +270,7 @@ func (w lintStructTagRule) checkTagNameIfNeed(checkCtx *checkContext, tag *struc key := tagKey(tag.Key) switch key { - case keyBSON, keyJSON, keyXML, keyYAML, keyProtobuf, keySpanner: + case keyBSON, keyCodec, keyJSON, keyProtobuf, keySpanner, keyXML, keyYAML: // keys that need to check for duplicated tags default: return "", true } @@ -241,7 +307,8 @@ func (lintStructTagRule) getTagName(tag *structtag.Tag) string { } } -func checkASN1Tag(checkCtx *checkContext, tag *structtag.Tag, fieldType ast.Expr) (message string, succeeded bool) { +func checkASN1Tag(checkCtx *checkContext, tag *structtag.Tag, field *ast.Field) (message string, succeeded bool) { + fieldType := field.Type checkList := slices.Concat(tag.Options, []string{tag.Name}) for _, opt := range checkList { switch opt { @@ -282,7 +349,7 @@ func checkCompoundANS1Option(checkCtx *checkContext, opt string, fieldType ast.E return "", true } -func checkDatastoreTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkDatastoreTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { switch opt { case "flatten", "noindex", "omitempty": @@ -297,15 +364,15 @@ func checkDatastoreTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) ( return "", true } -func checkDefaultTag(_ *checkContext, tag *structtag.Tag, fieldType ast.Expr) (message string, succeeded bool) { - if !typeValueMatch(fieldType, tag.Name) { +func checkDefaultTag(_ *checkContext, tag *structtag.Tag, field *ast.Field) (message string, succeeded bool) { + if !typeValueMatch(field.Type, tag.Name) { return msgTypeMismatch, false } return "", true } -func checkBSONTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkBSONTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { switch opt { case "inline", "minsize", "omitempty": @@ -320,7 +387,92 @@ func checkBSONTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (messa return "", true } -func checkJSONTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkCborTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { + hasToArray := false + hasOmitEmptyOrZero := false + hasKeyAsInt := false + + for _, opt := range tag.Options { + switch opt { + case "omitempty", "omitzero": + hasOmitEmptyOrZero = true + case "toarray": + if tag.Name != "" { + return `tag name for option "toarray" should be empty`, false + } + hasToArray = true + case "keyasint": + intKey, err := strconv.Atoi(tag.Name) + if err != nil { + return `tag name for option "keyasint" should be an integer`, false + } + + _, ok := checkCtx.usedTagNbr[intKey] + if ok { + return fmt.Sprintf("duplicated integer key %d", intKey), false + } + + checkCtx.usedTagNbr[intKey] = true + hasKeyAsInt = true + continue + + default: + if !checkCtx.isUserDefined(keyCbor, opt) { + return fmt.Sprintf(msgUnknownOption, opt), false + } + } + } + + // Check for duplicated tag names + if tag.Name != "" { + _, ok := checkCtx.usedTagName[tag.Name] + if ok { + return fmt.Sprintf("duplicated tag name %s", tag.Name), false + } + checkCtx.usedTagName[tag.Name] = true + } + + // Check for integer tag names without keyasint option + if !hasKeyAsInt { + _, err := strconv.Atoi(tag.Name) + if err == nil { + return `integer tag names are only allowed in presence of "keyasint" option`, false + } + } + + if hasToArray && hasOmitEmptyOrZero { + return `options "omitempty" and "omitzero" are ignored in presence of "toarray" option`, false + } + + return "", true +} + +const structTagCodecSpecialField = "_struct" + +func checkCodecTag(checkCtx *checkContext, tag *structtag.Tag, field *ast.Field) (message string, succeeded bool) { + fieldNames := field.Names + mustAddToCommonOptions := len(fieldNames) == 1 && fieldNames[0].Name == structTagCodecSpecialField // see https://github.com/mgechev/revive/issues/1477#issuecomment-3191493076 + for _, opt := range tag.Options { + if mustAddToCommonOptions { + checkCtx.addCommonOption(opt) + } else if checkCtx.isCommonOption(opt) { + return fmt.Sprintf("redundant option %q, already set for all fields", opt), false + } + + switch opt { + case "omitempty", "toarray", "int", "uint", "float", "-", "omitemptyarray": + default: + if checkCtx.isUserDefined(keyCodec, opt) { + continue + } + return fmt.Sprintf(msgUnknownOption, opt), false + } + } + + return "", true +} + +func checkJSONTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { switch opt { case "omitempty", "string": @@ -345,7 +497,7 @@ func checkJSONTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (messa return "", true } -func checkMapstructureTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkMapstructureTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { switch opt { case "omitempty", "reminder", "squash": @@ -360,13 +512,14 @@ func checkMapstructureTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr return "", true } -func checkPropertiesTag(_ *checkContext, tag *structtag.Tag, fieldType ast.Expr) (message string, succeeded bool) { +func checkPropertiesTag(_ *checkContext, tag *structtag.Tag, field *ast.Field) (message string, succeeded bool) { options := tag.Options if len(options) == 0 { return "", true } seenOptions := map[string]bool{} + fieldType := field.Type for _, opt := range options { msg, ok := fmt.Sprintf("unknown or malformed option %q", opt), false if key, value, found := strings.Cut(opt, "="); found { @@ -405,7 +558,7 @@ func checkCompoundPropertiesOption(key, value string, fieldType ast.Expr, seenOp return "", true } -func checkProtobufTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkProtobufTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { // check name switch tag.Name { case "bytes", "fixed32", "fixed64", "group", "varint", "zigzag32", "zigzag64": @@ -458,7 +611,7 @@ func checkProtobufOptions(checkCtx *checkContext, options []string) (message str return "", true } -func checkRequiredTag(_ *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkRequiredTag(_ *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { switch tag.Name { case "true", "false": return "", true @@ -467,7 +620,7 @@ func checkRequiredTag(_ *checkContext, tag *structtag.Tag, _ ast.Expr) (message } } -func checkTOMLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkTOMLTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { switch opt { case "omitempty": @@ -482,7 +635,7 @@ func checkTOMLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (messa return "", true } -func checkURLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkURLTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { var delimiter = "" for _, opt := range tag.Options { switch opt { @@ -505,7 +658,7 @@ func checkURLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (messag return "", true } -func checkValidateTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkValidateTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { previousOption := "" seenKeysOption := false options := append([]string{tag.Name}, tag.Options...) @@ -534,7 +687,7 @@ func checkValidateTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (m return "", true } -func checkXMLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkXMLTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { switch opt { case "any", "attr", "cdata", "chardata", "comment", "innerxml", "omitempty", "typeattr": @@ -549,7 +702,7 @@ func checkXMLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (messag return "", true } -func checkYAMLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkYAMLTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { switch opt { case "flow", "inline", "omitempty": @@ -564,7 +717,7 @@ func checkYAMLTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (messa return "", true } -func checkSpannerTag(checkCtx *checkContext, tag *structtag.Tag, _ ast.Expr) (message string, succeeded bool) { +func checkSpannerTag(checkCtx *checkContext, tag *structtag.Tag, _ *ast.Field) (message string, succeeded bool) { for _, opt := range tag.Options { if !checkCtx.isUserDefined(keySpanner, opt) { return fmt.Sprintf(msgUnknownOption, opt), false @@ -691,10 +844,14 @@ var validateSingleOptions = map[string]struct{}{ "cidr": {}, "cidrv4": {}, "cidrv6": {}, + "contains": {}, + "containsany": {}, + "containsrune": {}, "credit_card": {}, "cron": {}, "cve": {}, "datauri": {}, + "datetime": {}, "dir": {}, "dirpath": {}, "dive": {}, @@ -702,11 +859,34 @@ var validateSingleOptions = map[string]struct{}{ "e164": {}, "ein": {}, "email": {}, + "endsnotwith": {}, + "endswith": {}, + "eq": {}, + "eq_ignore_case": {}, + "eqcsfield": {}, + "eqfield": {}, "eth_addr": {}, "eth_addr_checksum": {}, + "excluded_if": {}, + "excluded_unless": {}, + "excluded_with": {}, + "excluded_with_all": {}, + "excluded_without": {}, + "excluded_without_all": {}, + "excludes": {}, + "excludesall": {}, + "excludesrune": {}, + "fieldcontains": {}, + "fieldexcludes": {}, "file": {}, "filepath": {}, "fqdn": {}, + "gt": {}, + "gtcsfield": {}, + "gte": {}, + "gtecsfield": {}, + "gtefield": {}, + "gtfield": {}, "hexadecimal": {}, "hexcolor": {}, "hostname": {}, @@ -719,21 +899,21 @@ var validateSingleOptions = map[string]struct{}{ "http_url": {}, "image": {}, "ip": {}, - "ip_addr": {}, "ip4_addr": {}, "ip6_addr": {}, + "ip_addr": {}, "ipv4": {}, "ipv6": {}, "isbn": {}, "isbn10": {}, "isbn13": {}, "isdefault": {}, - "iso3166_1_alpha_numeric": {}, - "iso3166_1_alpha_numeric_eu": {}, "iso3166_1_alpha2": {}, "iso3166_1_alpha2_eu": {}, "iso3166_1_alpha3": {}, "iso3166_1_alpha3_eu": {}, + "iso3166_1_alpha_numeric": {}, + "iso3166_1_alpha_numeric_eu": {}, "iso3166_2": {}, "iso4217": {}, "iso4217_numeric": {}, @@ -741,22 +921,46 @@ var validateSingleOptions = map[string]struct{}{ "json": {}, "jwt": {}, "latitude": {}, + "len": {}, "longitude": {}, "lowercase": {}, + "lt": {}, + "ltcsfield": {}, + "lte": {}, + "ltecsfield": {}, + "ltefield": {}, + "ltfield": {}, "luhn_checksum": {}, "mac": {}, + "max": {}, "md4": {}, "md5": {}, + "min": {}, "mongodb": {}, "mongodb_connection_string": {}, "multibyte": {}, + "ne": {}, + "ne_ignore_case": {}, + "necsfield": {}, + "nefield": {}, "number": {}, "numeric": {}, + "omitempty": {}, + "omitnil": {}, + "omitzero": {}, + "oneof": {}, + "oneofci": {}, "port": {}, "postcode_iso3166_alpha2": {}, "postcode_iso3166_alpha2_field": {}, "printascii": {}, "required": {}, + "required_if": {}, + "required_unless": {}, + "required_with": {}, + "required_with_all": {}, + "required_without": {}, + "required_without_all": {}, "rgb": {}, "rgba": {}, "ripemd128": {}, @@ -765,18 +969,23 @@ var validateSingleOptions = map[string]struct{}{ "sha256": {}, "sha384": {}, "sha512": {}, + "skip_unless": {}, + "spicedb": {}, "ssn": {}, - "tcp_addr": {}, + "startsnotwith": {}, + "startswith": {}, "tcp4_addr": {}, "tcp6_addr": {}, + "tcp_addr": {}, "tiger128": {}, "tiger160": {}, "tiger192": {}, "timezone": {}, - "udp_addr": {}, "udp4_addr": {}, "udp6_addr": {}, + "udp_addr": {}, "ulid": {}, + "unique": {}, "unix_addr": {}, "uppercase": {}, "uri": {}, @@ -784,13 +993,14 @@ var validateSingleOptions = map[string]struct{}{ "url_encoded": {}, "urn_rfc2141": {}, "uuid": {}, - "uuid_rfc4122": {}, "uuid3": {}, "uuid3_rfc4122": {}, "uuid4": {}, "uuid4_rfc4122": {}, "uuid5": {}, "uuid5_rfc4122": {}, + "uuid_rfc4122": {}, + "validateFn": {}, } // These are options that are used in expressions of the form: diff --git a/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go b/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go index 21fe09fa85..772896c1f5 100644 --- a/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go +++ b/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go @@ -190,9 +190,7 @@ func (*lintUnconditionalRecursionRule) hasControlExit(node ast.Node) bool { functionName := se.Sel.Name pkgName := id.Name - if isCallToExitFunction(pkgName, functionName) { - return true - } + return isCallToExitFunction(pkgName, functionName, n.Args) } return false diff --git a/vendor/github.com/mgechev/revive/rule/unnecessary_if.go b/vendor/github.com/mgechev/revive/rule/unnecessary_if.go new file mode 100644 index 0000000000..f2f8174bdc --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unnecessary_if.go @@ -0,0 +1,202 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/mgechev/revive/internal/astutils" + "github.com/mgechev/revive/lint" +) + +// UnnecessaryIfRule warns on if...else statements that can be replaced by simpler expressions. +type UnnecessaryIfRule struct{} + +// Apply applies the rule to given file. +func (*UnnecessaryIfRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := &lintUnnecessaryIf{onFailure: onFailure} + for _, decl := range file.AST.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok || fn.Body == nil { + continue + } + + ast.Walk(w, fn.Body) + } + + return failures +} + +// Name returns the rule name. +func (*UnnecessaryIfRule) Name() string { + return "unnecessary-if" +} + +type lintUnnecessaryIf struct { + onFailure func(lint.Failure) +} + +// Visit walks the AST looking for if statements of the form: +// +// if cond { return } else { return } +// +// or +// +// if cond { = } else { = }. +func (w *lintUnnecessaryIf) Visit(node ast.Node) ast.Visitor { + ifStmt, ok := node.(*ast.IfStmt) + if !ok { + return w // not an if statement + } + + // if without else or if with initialization + mustSkip := ifStmt.Else == nil || ifStmt.Init != nil + if mustSkip { + return w + } + + elseBranch, ok := ifStmt.Else.(*ast.BlockStmt) + if !ok { // if-else-if construction + return w // the rule only copes with single if...else statements + } + + thenStmts := ifStmt.Body.List + elseStmts := elseBranch.List + if len(thenStmts) != 1 || len(elseStmts) != 1 { + return w // then and else branches do not have just one statement + } + + replacement := "" + thenBool := false + switch thenStmt := thenStmts[0].(type) { + case *ast.ReturnStmt: + replacement, thenBool = w.replacementForReturnStmt(thenStmt, elseStmts) + case *ast.AssignStmt: + replacement, thenBool = w.replacementForAssignmentStmt(thenStmt, elseStmts) + default: + return w // the then branch is neither a return nor an assignment + } + + if replacement == "" { + return w // no replacement found + } + + cond := w.condAsString(ifStmt.Cond, !thenBool) + msg := "replace this conditional by: " + replacement + " " + cond + + w.onFailure(lint.Failure{ + Confidence: 1.0, + Node: ifStmt, + Category: lint.FailureCategoryLogic, + Failure: msg, + }) + + return nil +} + +var relationalOppositeOf = map[token.Token]token.Token{ + token.EQL: token.NEQ, // == != + token.GEQ: token.LSS, // >= < + token.GTR: token.LEQ, // > <= + token.LEQ: token.GTR, // <= > + token.LSS: token.GEQ, // < >= + token.NEQ: token.EQL, // != == +} + +// condAsString yields the string representation of the given condition expression. +// The method will try to minimize the negations in the resulting expression. +func (*lintUnnecessaryIf) condAsString(cond ast.Expr, mustNegate bool) string { + result := astutils.GoFmt(cond) + + if mustNegate { + result = "!(" + result + ")" // naive negation + + // check if we can build a simpler expression + if binExp, ok := cond.(*ast.BinaryExpr); ok { + originalOp := binExp.Op + opposite, ok := relationalOppositeOf[originalOp] + if ok { + binExp.Op = opposite + result = astutils.GoFmt(binExp) // replace initial result by a simpler one + binExp.Op = originalOp + } + } + } + + return result +} + +// replacementForAssignmentStmt returns a replacement statement != "" +// iff both then and else statements are of the form = +// If the replacement != "" then the second return value is the Boolean value +// of in the then-side assignment, false otherwise. +func (w *lintUnnecessaryIf) replacementForAssignmentStmt(thenStmt *ast.AssignStmt, elseStmts []ast.Stmt) (replacement string, thenBool bool) { + thenBoolStr, ok := w.isSingleBooleanLiteral(thenStmt.Rhs) + if !ok { + return "", false + } + + thenLHS := astutils.GoFmt(thenStmt.Lhs[0]) + + elseStmt, ok := elseStmts[0].(*ast.AssignStmt) + if !ok { + return "", false + } + + elseLHS := astutils.GoFmt(elseStmt.Lhs[0]) + if thenLHS != elseLHS { + return "", false + } + + _, ok = w.isSingleBooleanLiteral(elseStmt.Rhs) + if !ok { + return "", false + } + + return fmt.Sprintf("%s %s", thenLHS, thenStmt.Tok.String()), thenBoolStr == "true" +} + +// replacementForReturnStmt returns a replacement statement != "" +// iff both then and else statements are of the form return +// If the replacement != "" then the second return value is the string representation +// of in the then-side assignment, "" otherwise. +func (w *lintUnnecessaryIf) replacementForReturnStmt(thenStmt *ast.ReturnStmt, elseStmts []ast.Stmt) (replacement string, thenBool bool) { + thenBoolStr, ok := w.isSingleBooleanLiteral(thenStmt.Results) + if !ok { + return "", false + } + + elseStmt, ok := elseStmts[0].(*ast.ReturnStmt) + if !ok { + return "", false + } + + _, ok = w.isSingleBooleanLiteral(elseStmt.Results) + if !ok { + return "", false + } + + return "return", thenBoolStr == "true" +} + +// isSingleBooleanLiteral returns the string representation of and true +// if the given list of expressions has exactly one element and that element is a bool literal (true or false), +// otherwise it returns "" and false. +func (*lintUnnecessaryIf) isSingleBooleanLiteral(exprs []ast.Expr) (string, bool) { + if len(exprs) != 1 { + return "", false + } + + ident, ok := exprs[0].(*ast.Ident) + if !ok { + return "", false + } + + return ident.Name, (ident.Name == "true" || ident.Name == "false") +} diff --git a/vendor/github.com/mgechev/revive/rule/use_waitgroup_go.go b/vendor/github.com/mgechev/revive/rule/use_waitgroup_go.go index 9dd80d79d9..2e58eb6fbb 100644 --- a/vendor/github.com/mgechev/revive/rule/use_waitgroup_go.go +++ b/vendor/github.com/mgechev/revive/rule/use_waitgroup_go.go @@ -113,7 +113,7 @@ func (w *lintUseWaitGroupGo) analyzeBlock(b *ast.BlockStmt) { w.onFailure(lint.Failure{ Confidence: 1, Node: call, - Category: lint.FailureCategoryCodeStyle, + Category: lint.FailureCategoryStyle, Failure: "replace wg.Add()...go {...wg.Done()...} with wg.Go(...)", }) diff --git a/vendor/github.com/mgechev/revive/rule/useless_fallthrough.go b/vendor/github.com/mgechev/revive/rule/useless_fallthrough.go index 2238767220..bef77575b7 100644 --- a/vendor/github.com/mgechev/revive/rule/useless_fallthrough.go +++ b/vendor/github.com/mgechev/revive/rule/useless_fallthrough.go @@ -69,8 +69,9 @@ func (w *lintUselessFallthrough) Visit(node ast.Node) ast.Visitor { confidence := 1.0 if nextCaseClause := switchStmt.Body.List[i+1].(*ast.CaseClause); nextCaseClause.List == nil { - // the next case clause is the default clause, report with lower confidence. - confidence = 0.8 + // The next clause is 'default:', and this is a valid pattern. + // Skip reporting this fallthrough. + continue } if _, ok := w.commentsMap[branchStmt]; ok { // The fallthrough has a comment, report with lower confidence. @@ -80,7 +81,7 @@ func (w *lintUselessFallthrough) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: confidence, Node: branchStmt, - Category: lint.FailureCategoryCodeStyle, + Category: lint.FailureCategoryStyle, Failure: `this "fallthrough" can be removed by consolidating this case clause with the next one`, }) diff --git a/vendor/github.com/mgechev/revive/rule/utils.go b/vendor/github.com/mgechev/revive/rule/utils.go index 24a7234352..0c9d7bc95c 100644 --- a/vendor/github.com/mgechev/revive/rule/utils.go +++ b/vendor/github.com/mgechev/revive/rule/utils.go @@ -2,24 +2,40 @@ package rule import ( "fmt" + "go/ast" "go/token" "regexp" "strings" + "github.com/mgechev/revive/internal/astutils" "github.com/mgechev/revive/lint" ) +// exitChecker is a function type that checks whether a function call is an exit function. +type exitFuncChecker func(args []ast.Expr) bool + +var alwaysTrue exitFuncChecker = func([]ast.Expr) bool { return true } + // exitFunctions is a map of std packages and functions that are considered as exit functions. -var exitFunctions = map[string]map[string]bool{ - "os": {"Exit": true}, - "syscall": {"Exit": true}, +var exitFunctions = map[string]map[string]exitFuncChecker{ + "os": {"Exit": alwaysTrue}, + "syscall": {"Exit": alwaysTrue}, "log": { - "Fatal": true, - "Fatalf": true, - "Fatalln": true, - "Panic": true, - "Panicf": true, - "Panicln": true, + "Fatal": alwaysTrue, + "Fatalf": alwaysTrue, + "Fatalln": alwaysTrue, + "Panic": alwaysTrue, + "Panicf": alwaysTrue, + "Panicln": alwaysTrue, + }, + "flag": { + "Parse": func([]ast.Expr) bool { return true }, + "NewFlagSet": func(args []ast.Expr) bool { + if len(args) != 2 { + return false + } + return astutils.IsPkgDotName(args[1], "flag", "ExitOnError") + }, }, } @@ -86,8 +102,18 @@ func isDirectiveComment(line string) bool { } // isCallToExitFunction checks if the function call is a call to an exit function. -func isCallToExitFunction(pkgName, functionName string) bool { - return exitFunctions[pkgName] != nil && exitFunctions[pkgName][functionName] +func isCallToExitFunction(pkgName, functionName string, callArgs []ast.Expr) bool { + m, ok := exitFunctions[pkgName] + if !ok { + return false + } + + check, ok := m[functionName] + if !ok { + return false + } + + return check(callArgs) } // newInternalFailureError returns a slice of Failure with a single internal failure in it. diff --git a/vendor/github.com/mgechev/revive/rule/var_naming.go b/vendor/github.com/mgechev/revive/rule/var_naming.go index c8c26a44be..8b893fabf8 100644 --- a/vendor/github.com/mgechev/revive/rule/var_naming.go +++ b/vendor/github.com/mgechev/revive/rule/var_naming.go @@ -23,24 +23,50 @@ var knownNameExceptions = map[string]bool{ // The rule warns about the usage of any package name in this list if skipPackageNameChecks is false. // Values in the list should be lowercased. var defaultBadPackageNames = map[string]struct{}{ - "common": {}, - "interfaces": {}, - "misc": {}, - "types": {}, - "util": {}, - "utils": {}, + "api": {}, + "common": {}, + "interface": {}, + "interfaces": {}, + "misc": {}, + "miscellaneous": {}, + "shared": {}, + "type": {}, + "types": {}, + "util": {}, + "utilities": {}, + "utils": {}, +} + +var stdLibPackageNames = map[string]struct{}{ + "bytes": {}, + "context": {}, + "crypto": {}, + "errors": {}, + "fmt": {}, + "hash": {}, + "http": {}, + "io": {}, + "json": {}, + "math": {}, + "net": {}, + "os": {}, + "sort": {}, + "string": {}, + "time": {}, + "xml": {}, } // VarNamingRule lints the name of a variable. type VarNamingRule struct { - allowList []string - blockList []string - skipInitialismNameChecks bool // if true disable enforcing capitals for common initialisms - - allowUpperCaseConst bool // if true - allows to use UPPER_SOME_NAMES for constants - skipPackageNameChecks bool // check for meaningless and user-defined bad package names - extraBadPackageNames map[string]struct{} // inactive if skipPackageNameChecks is false - pkgNameAlreadyChecked syncSet // set of packages names already checked + allowList []string + blockList []string + + allowUpperCaseConst bool // if true - allows to use UPPER_SOME_NAMES for constants + skipInitialismNameChecks bool // if true - disable enforcing capitals for common initialisms + skipPackageNameChecks bool // if true - disable check for meaningless and user-defined bad package names + skipPackageNameCollisionWithGoStd bool // if true - disable checks for collisions with Go standard library package names + extraBadPackageNames map[string]struct{} // inactive if skipPackageNameChecks is false + pkgNameAlreadyChecked syncSet // set of packages names already checked } // Configure validates the rule configuration, and configures the rule accordingly. @@ -103,6 +129,9 @@ func (r *VarNamingRule) Configure(arguments lint.Arguments) error { r.extraBadPackageNames[strings.ToLower(n)] = struct{}{} } } + if isRuleOption(k, "skipPackageNameCollisionWithGoStd") { + r.skipPackageNameCollisionWithGoStd = true + } } } return nil @@ -154,6 +183,13 @@ func (r *VarNamingRule) applyPackageCheckRules(file *lint.File, onFailure func(f pkgNameNode := file.AST.Name pkgName := pkgNameNode.Name pkgNameLower := strings.ToLower(pkgName) + + // Check if top level package + if pkgNameLower == "pkg" && filepath.Base(fileDir) != pkgName { + onFailure(r.pkgNameFailure(pkgNameNode, "should not have a root level package called pkg")) + return + } + if _, ok := r.extraBadPackageNames[pkgNameLower]; ok { onFailure(r.pkgNameFailure(pkgNameNode, "avoid bad package names")) return @@ -164,6 +200,10 @@ func (r *VarNamingRule) applyPackageCheckRules(file *lint.File, onFailure func(f return } + if _, ok := stdLibPackageNames[pkgNameLower]; ok && !r.skipPackageNameCollisionWithGoStd { + onFailure(r.pkgNameFailure(pkgNameNode, "avoid package names that conflict with Go standard library package names")) + } + // Package names need slightly different handling than other names. if strings.Contains(pkgName, "_") && !strings.HasSuffix(pkgName, "_test") { onFailure(r.pkgNameFailure(pkgNameNode, "don't use an underscore in package name")) diff --git a/vendor/github.com/securego/gosec/v2/action.yml b/vendor/github.com/securego/gosec/v2/action.yml index b19d17ee9c..bc5d4a8dd0 100644 --- a/vendor/github.com/securego/gosec/v2/action.yml +++ b/vendor/github.com/securego/gosec/v2/action.yml @@ -10,7 +10,7 @@ inputs: runs: using: "docker" - image: "docker://securego/gosec:2.22.9" + image: "docker://securego/gosec:2.22.10" args: - ${{ inputs.args }} diff --git a/vendor/github.com/securego/gosec/v2/analyzer.go b/vendor/github.com/securego/gosec/v2/analyzer.go index 371904b6dc..0508bcea6e 100644 --- a/vendor/github.com/securego/gosec/v2/analyzer.go +++ b/vendor/github.com/securego/gosec/v2/analyzer.go @@ -35,6 +35,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/analysis/passes/ctrlflow" + "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/packages" "github.com/securego/gosec/v2/analyzers" @@ -187,7 +189,6 @@ type Analyzer struct { trackSuppressions bool concurrency int analyzerSet *analyzers.AnalyzerSet - mu sync.Mutex } // NewAnalyzer builds a new analyzer. @@ -251,12 +252,6 @@ func (gosec *Analyzer) LoadAnalyzers(analyzerDefinitions map[string]analyzers.An // Process kicks off the analysis process for a given package func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error { - config := &packages.Config{ - Mode: LoadMode, - BuildFlags: buildTags, - Tests: gosec.tests, - } - type result struct { pkgPath string pkgs []*packages.Package @@ -273,7 +268,7 @@ func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error for { select { case s := <-j: - pkgs, err := gosec.load(s, config) + pkgs, err := gosec.load(s, buildTags) select { case r <- result{pkgPath: s, pkgs: pkgs, err: err}: case <-quit: @@ -326,7 +321,7 @@ func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error return nil } -func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages.Package, error) { +func (gosec *Analyzer) load(pkgPath string, buildTags []string) ([]*packages.Package, error) { abspath, err := GetPkgAbsPath(pkgPath) if err != nil { gosec.logger.Printf("Skipping: %s. Path doesn't exist.", abspath) @@ -334,12 +329,10 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. } gosec.logger.Println("Import directory:", abspath) - // step 1/3 create build context. + + // step 1/2: build context requires the array of build tags. buildD := build.Default - // step 2/3: add build tags to get env dependent files into basePackage. - gosec.mu.Lock() - buildD.BuildTags = conf.BuildFlags - gosec.mu.Unlock() + buildD.BuildTags = buildTags basePackage, err := buildD.ImportDir(pkgPath, build.ImportComment) if err != nil { return []*packages.Package{}, fmt.Errorf("importing dir %q: %w", pkgPath, err) @@ -362,10 +355,12 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. } } - // step 3/3 remove build tags from conf to proceed build correctly. - gosec.mu.Lock() - conf.BuildFlags = nil - defer gosec.mu.Unlock() + // step 2/2: pass in cli encoded build flags to build correctly. + conf := &packages.Config{ + Mode: LoadMode, + BuildFlags: CLIBuildTags(buildTags), + Tests: gosec.tests, + } pkgs, err := packages.Load(conf, packageFiles...) if err != nil { return []*packages.Package{}, fmt.Errorf("loading files from package %q: %w", pkgPath, err) @@ -412,6 +407,11 @@ func (gosec *Analyzer) CheckRules(pkg *packages.Package) { // CheckAnalyzers runs analyzers on a given package. func (gosec *Analyzer) CheckAnalyzers(pkg *packages.Package) { + // significant performance improvement if no analyzers are loaded + if len(gosec.analyzerSet.Analyzers) == 0 { + return + } + ssaResult, err := gosec.buildSSA(pkg) if err != nil || ssaResult == nil { errMessage := "Error building the SSA representation of the package " + pkg.Name + ": " @@ -432,7 +432,7 @@ func (gosec *Analyzer) CheckAnalyzers(pkg *packages.Package) { buildssa.Analyzer: &analyzers.SSAAnalyzerResult{ Config: gosec.Config(), Logger: gosec.logger, - SSA: ssaResult.(*buildssa.SSA), + SSA: ssaResult, }, } @@ -493,7 +493,7 @@ func (gosec *Analyzer) generatedFiles(pkg *packages.Package) map[string]bool { } // buildSSA runs the SSA pass which builds the SSA representation of the package. It handles gracefully any panic. -func (gosec *Analyzer) buildSSA(pkg *packages.Package) (interface{}, error) { +func (gosec *Analyzer) buildSSA(pkg *packages.Package) (*buildssa.SSA, error) { defer func() { if r := recover(); r != nil { gosec.logger.Printf( @@ -502,26 +502,54 @@ func (gosec *Analyzer) buildSSA(pkg *packages.Package) (interface{}, error) { ) } }() - ssaPass := &analysis.Pass{ - Analyzer: buildssa.Analyzer, - Fset: pkg.Fset, - Files: pkg.Syntax, - OtherFiles: pkg.OtherFiles, - IgnoredFiles: pkg.IgnoredFiles, - Pkg: pkg.Types, - TypesInfo: pkg.TypesInfo, - TypesSizes: pkg.TypesSizes, - ResultOf: nil, - Report: nil, - ImportObjectFact: nil, - ExportObjectFact: nil, - ImportPackageFact: nil, - ExportPackageFact: nil, - AllObjectFacts: nil, - AllPackageFacts: nil, - } - - return ssaPass.Analyzer.Run(ssaPass) + if pkg == nil { + return nil, errors.New("nil package provided") + } + if pkg.Types == nil { + return nil, fmt.Errorf("package %s has no type information (compilation failed?)", pkg.Name) + } + if pkg.TypesInfo == nil { + return nil, fmt.Errorf("package %s has no type information", pkg.Name) + } + pass := &analysis.Pass{ + Fset: pkg.Fset, + Files: pkg.Syntax, + OtherFiles: pkg.OtherFiles, + IgnoredFiles: pkg.IgnoredFiles, + Pkg: pkg.Types, + TypesInfo: pkg.TypesInfo, + TypesSizes: pkg.TypesSizes, + ResultOf: make(map[*analysis.Analyzer]interface{}), + Report: func(d analysis.Diagnostic) {}, + ImportObjectFact: func(obj types.Object, fact analysis.Fact) bool { return false }, + ExportObjectFact: func(obj types.Object, fact analysis.Fact) {}, + } + + pass.Analyzer = inspect.Analyzer + i, err := inspect.Analyzer.Run(pass) + if err != nil { + return nil, fmt.Errorf("running inspect analysis: %w", err) + } + pass.ResultOf[inspect.Analyzer] = i + + pass.Analyzer = ctrlflow.Analyzer + cf, err := ctrlflow.Analyzer.Run(pass) + if err != nil { + return nil, fmt.Errorf("running control flow analysis: %w", err) + } + pass.ResultOf[ctrlflow.Analyzer] = cf + + pass.Analyzer = buildssa.Analyzer + result, err := buildssa.Analyzer.Run(pass) + if err != nil { + return nil, fmt.Errorf("running SSA analysis: %w", err) + } + + ssaResult, ok := result.(*buildssa.SSA) + if !ok { + return nil, fmt.Errorf("unexpected SSA analysis result type: %T", result) + } + return ssaResult, nil } // ParseErrors parses the errors from given package diff --git a/vendor/github.com/securego/gosec/v2/helpers.go b/vendor/github.com/securego/gosec/v2/helpers.go index 33e2b4a0d8..7f5724b330 100644 --- a/vendor/github.com/securego/gosec/v2/helpers.go +++ b/vendor/github.com/securego/gosec/v2/helpers.go @@ -553,3 +553,24 @@ func parseGoVersion(version string) (int, int, int) { return major, minor, build } + +// CLIBuildTags converts a list of Go build tags into the corresponding CLI +// build flag (-tags=form) by trimming whitespace, removing empty entries, +// and joining them into a comma-separated -tags argument for use with go build +// commands. +func CLIBuildTags(buildTags []string) []string { + var buildFlags []string + if len(buildTags) > 0 { + for _, tag := range buildTags { + // remove empty entries and surrounding whitespace + if t := strings.TrimSpace(tag); t != "" { + buildFlags = append(buildFlags, t) + } + } + if len(buildFlags) > 0 { + buildFlags = []string{"-tags=" + strings.Join(buildFlags, ",")} + } + } + + return buildFlags +} diff --git a/vendor/github.com/spf13/afero/.editorconfig b/vendor/github.com/spf13/afero/.editorconfig index 4492e9f9fe..a85749f190 100644 --- a/vendor/github.com/spf13/afero/.editorconfig +++ b/vendor/github.com/spf13/afero/.editorconfig @@ -10,3 +10,6 @@ trim_trailing_whitespace = true [*.go] indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/vendor/github.com/spf13/afero/.golangci.yaml b/vendor/github.com/spf13/afero/.golangci.yaml index 806289a250..4f359b81af 100644 --- a/vendor/github.com/spf13/afero/.golangci.yaml +++ b/vendor/github.com/spf13/afero/.golangci.yaml @@ -1,18 +1,48 @@ -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/afero) +version: "2" + +run: + timeout: 10m linters: - disable-all: true - enable: - - gci - - gofmt - - gofumpt - - staticcheck - -issues: - exclude-dirs: - - gcsfs/internal/stiface + enable: + - govet + - ineffassign + - misspell + - nolintlint + # - revive + - staticcheck + - unused + + disable: + - errcheck + # - staticcheck + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + + exclusions: + paths: + - gcsfs/internal/stiface + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + - golines + + settings: + gci: + sections: + - standard + - default + - localmodule + + exclusions: + paths: + - gcsfs/internal/stiface diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 86f1545543..ef67e9a77e 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -1,479 +1,474 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) +afero logo-sm -A FileSystem Abstraction System for Go -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) -[![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero?style=flat-square)](https://goreportcard.com/report/github.com/spf13/afero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) +[![GoDoc](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero)](https://goreportcard.com/report/github.com/spf13/afero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square") -# Overview -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. +# Afero: The Universal Filesystem Abstraction for Go -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with, while retaining all the power -and benefit of the os and ioutil packages. +Afero is a powerful and extensible filesystem abstraction system for Go. It provides a single, unified API for interacting with diverse filesystems—including the local disk, memory, archives, and network storage. -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. +Afero acts as a drop-in replacement for the standard `os` package, enabling you to write modular code that is agnostic to the underlying storage, dramatically simplifies testing, and allows for sophisticated architectural patterns through filesystem composition. -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. +## Why Afero? +Afero elevates filesystem interaction beyond simple file reading and writing, offering solutions for testability, flexibility, and advanced architecture. -## Afero Features +🔑 **Key Features:** -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` +* **Universal API:** Write your code once. Run it against the local OS, in-memory storage, ZIP/TAR archives, or remote systems (SFTP, GCS). +* **Ultimate Testability:** Utilize `MemMapFs`, a fully concurrent-safe, read/write in-memory filesystem. Write fast, isolated, and reliable unit tests without touching the physical disk or worrying about cleanup. +* **Powerful Composition:** Afero's hidden superpower. Layer filesystems on top of each other to create sophisticated behaviors: + * **Sandboxing:** Use `CopyOnWriteFs` to create temporary scratch spaces that isolate changes from the base filesystem. + * **Caching:** Use `CacheOnReadFs` to automatically layer a fast cache (like memory) over a slow backend (like a network drive). + * **Security Jails:** Use `BasePathFs` to restrict application access to a specific subdirectory (chroot). +* **`os` Package Compatibility:** Afero mirrors the functions in the standard `os` package, making adoption and refactoring seamless. +* **`io/fs` Compatibility:** Fully compatible with the Go standard library's `io/fs` interfaces. -# Using Afero +## Installation -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero +```bash +go get github.com/spf13/afero +``` -Next include Afero in your application. ```go import "github.com/spf13/afero" ``` -## Step 2: Declare a backend +## Quick Start: The Power of Abstraction + +The core of Afero is the `afero.Fs` interface. By designing your functions to accept this interface rather than calling `os.*` functions directly, your code instantly becomes more flexible and testable. + +### 1. Refactor Your Code + +Change functions that rely on the `os` package to accept `afero.Fs`. -First define a package variable and set it to a pointer to a filesystem. ```go -var AppFs = afero.NewMemMapFs() +// Before: Coupled to the OS and difficult to test +// func ProcessConfiguration(path string) error { +// data, err := os.ReadFile(path) +// ... +// } -or +import "github.com/spf13/afero" -var AppFs = afero.NewOsFs() +// After: Decoupled, flexible, and testable +func ProcessConfiguration(fs afero.Fs, path string) error { + // Use Afero utility functions which mirror os/ioutil + data, err := afero.ReadFile(fs, path) + // ... process the data + return err +} ``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. -## Step 3: Use it like you would the OS package +### 2. Usage in Production -Throughout your application use any function and method like you normally -would. +In your production environment, inject the `OsFs` backend, which wraps the standard operating system calls. -So if my application before had: -```go -os.Open("/tmp/foo") -``` -We would replace it with: ```go -AppFs.Open("/tmp/foo") +func main() { + // Use the real OS filesystem + AppFs := afero.NewOsFs() + ProcessConfiguration(AppFs, "/etc/myapp.conf") +} ``` -`AppFs` being the variable we defined above. +### 3. Usage in Testing +In your tests, inject `MemMapFs`. This provides a blazing-fast, isolated, in-memory filesystem that requires no disk I/O and no cleanup. -## List of all available functions - -File System Methods Available: ```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error +func TestProcessConfiguration(t *testing.T) { + // Use the in-memory filesystem + AppFs := afero.NewMemMapFs() + + // Pre-populate the memory filesystem for the test + configPath := "/test/config.json" + afero.WriteFile(AppFs, configPath, []byte(`{"feature": true}`), 0644) + + // Run the test entirely in memory + err := ProcessConfiguration(AppFs, configPath) + if err != nil { + t.Fatal(err) + } +} ``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. -## Using Afero's utility functions +## Afero's Superpower: Composition -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. +Afero's most unique feature is its ability to combine filesystems. This allows you to build complex behaviors out of simple components, keeping your application logic clean. -The afero utilities support all afero compatible backends. +### Example 1: Sandboxing with Copy-on-Write -The list of utilities includes: +Create a temporary environment where an application can "modify" system files without affecting the actual disk. ```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) +// 1. The base layer is the real OS, made read-only for safety. +baseFs := afero.NewReadOnlyFs(afero.NewOsFs()) -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. +// 2. The overlay layer is a temporary in-memory filesystem for changes. +overlayFs := afero.NewMemMapFs() -### Calling utilities directly +// 3. Combine them. Reads fall through to the base; writes only hit the overlay. +sandboxFs := afero.NewCopyOnWriteFs(baseFs, overlayFs) -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") +// The application can now "modify" /etc/hosts, but the changes are isolated in memory. +afero.WriteFile(sandboxFs, "/etc/hosts", []byte("127.0.0.1 sandboxed-app"), 0644) +// The real /etc/hosts on disk is untouched. ``` -### Calling via Afero +### Example 2: Caching a Slow Filesystem -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` +Improve performance by layering a fast cache (like memory) over a slow backend (like a network drive or cloud storage). -## Using Afero for Testing +```go +import "time" -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. +// Assume 'remoteFs' is a slow backend (e.g., SFTP or GCS) +var remoteFs afero.Fs -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed +// 'cacheFs' is a fast in-memory backend +cacheFs := afero.NewMemMapFs() -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). +// Create the caching layer. Cache items for 5 minutes upon first read. +cachedFs := afero.NewCacheOnReadFs(remoteFs, cacheFs, 5*time.Minute) -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. +// The first read is slow (fetches from remote, then caches) +data1, _ := afero.ReadFile(cachedFs, "data.json") -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} +// The second read is instant (serves from memory cache) +data2, _ := afero.ReadFile(cachedFs, "data.json") ``` -# Available Backends +### Example 3: Security Jails (chroot) + +Restrict an application component's access to a specific subdirectory. -## Operating System Native +```go +osFs := afero.NewOsFs() -### OsFs +// Create a filesystem rooted at /home/user/public +// The application cannot access anything above this directory. +jailedFs := afero.NewBasePathFs(osFs, "/home/user/public") -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. +// To the application, this is reading "/" +// In reality, it's reading "/home/user/public/" +dirInfo, err := afero.ReadDir(jailedFs, "/") -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) +// Attempts to access parent directories fail +_, err = jailedFs.Open("../secrets.txt") // Returns an error ``` -## Memory Backed Storage +## Real-World Use Cases -### MemMapFs +### Build Cloud-Agnostic Applications -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. +Write applications that seamlessly work with different storage backends: ```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` +type DocumentProcessor struct { + fs afero.Fs +} + +func NewDocumentProcessor(fs afero.Fs) *DocumentProcessor { + return &DocumentProcessor{fs: fs} +} -#### InMemoryFile +func (p *DocumentProcessor) Process(inputPath, outputPath string) error { + // This code works whether fs is local disk, cloud storage, or memory + content, err := afero.ReadFile(p.fs, inputPath) + if err != nil { + return err + } + + processed := processContent(content) + return afero.WriteFile(p.fs, outputPath, processed, 0644) +} -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. +// Use with local filesystem +processor := NewDocumentProcessor(afero.NewOsFs()) -## Network Interfaces +// Use with Google Cloud Storage +processor := NewDocumentProcessor(gcsFS) -### SftpFs +// Use with in-memory filesystem for testing +processor := NewDocumentProcessor(afero.NewMemMapFs()) +``` -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. +### Treating Archives as Filesystems -### GCSFs +Read files directly from `.zip` or `.tar` archives without unpacking them to disk first. -Afero has experimental support for Google Cloud Storage (GCS). You can either set the -`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in -`NewGcsFS` to configure access to your GCS bucket. +```go +import ( + "archive/zip" + "github.com/spf13/afero/zipfs" +) -Some known limitations of the existing implementation: -* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. -* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. -* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. +// Assume 'zipReader' is a *zip.Reader initialized from a file or memory +var zipReader *zip.Reader +// Create a read-only ZipFs +archiveFS := zipfs.New(zipReader) -## Filtering Backends +// Read a file from within the archive using the standard Afero API +content, err := afero.ReadFile(archiveFS, "/docs/readme.md") +``` -### BasePathFs +### Serving Any Filesystem over HTTP -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. +Use `HttpFs` to expose any Afero filesystem—even one created dynamically in memory—through a standard Go web server. ```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` +import ( + "net/http" + "github.com/spf13/afero" +) -### ReadOnlyFs +func main() { + memFS := afero.NewMemMapFs() + afero.WriteFile(memFS, "index.html", []byte("

Hello from Memory!

"), 0644) -A thin wrapper around the source Fs providing a read only view. + // Wrap the memory filesystem to make it compatible with http.FileServer. + httpFS := afero.NewHttpFs(memFS) -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM + http.Handle("/", http.FileServer(httpFS.Dir("/"))) + http.ListenAndServe(":8080", nil) +} ``` -# RegexpFs +### Testing Made Simple -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. +One of Afero's greatest strengths is making filesystem-dependent code easily testable: ```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` +func SaveUserData(fs afero.Fs, userID string, data []byte) error { + filename := fmt.Sprintf("users/%s.json", userID) + return afero.WriteFile(fs, filename, data, 0644) +} -### HttpFs +func TestSaveUserData(t *testing.T) { + // Create a clean, fast, in-memory filesystem for testing + testFS := afero.NewMemMapFs() + + userData := []byte(`{"name": "John", "email": "john@example.com"}`) + err := SaveUserData(testFS, "123", userData) + + if err != nil { + t.Fatalf("SaveUserData failed: %v", err) + } + + // Verify the file was saved correctly + saved, err := afero.ReadFile(testFS, "users/123.json") + if err != nil { + t.Fatalf("Failed to read saved file: %v", err) + } + + if string(saved) != string(userData) { + t.Errorf("Data mismatch: got %s, want %s", saved, userData) + } +} +``` -Afero provides an http compatible backend which can wrap any of the existing -backends. +**Benefits of testing with Afero:** +- ⚡ **Fast** - No disk I/O, tests run in memory +- 🔄 **Reliable** - Each test starts with a clean slate +- 🧹 **No cleanup** - Memory is automatically freed +- 🔒 **Safe** - Can't accidentally modify real files +- 🏃 **Parallel** - Tests can run concurrently without conflicts + +## Backend Reference + +| Type | Backend | Constructor | Description | Status | +| :--- | :--- | :--- | :--- | :--- | +| **Core** | **OsFs** | `afero.NewOsFs()` | Interacts with the real operating system filesystem. Use in production. | ✅ Official | +| | **MemMapFs** | `afero.NewMemMapFs()` | A fast, atomic, concurrent-safe, in-memory filesystem. Ideal for testing. | ✅ Official | +| **Composition** | **CopyOnWriteFs**| `afero.NewCopyOnWriteFs(base, overlay)` | A read-only base with a writable overlay. Ideal for sandboxing. | ✅ Official | +| | **CacheOnReadFs**| `afero.NewCacheOnReadFs(base, cache, ttl)` | Lazily caches files from a slow base into a fast layer on first read. | ✅ Official | +| | **BasePathFs** | `afero.NewBasePathFs(source, path)` | Restricts operations to a subdirectory (chroot/jail). | ✅ Official | +| | **ReadOnlyFs** | `afero.NewReadOnlyFs(source)` | Provides a read-only view, preventing any modifications. | ✅ Official | +| | **RegexpFs** | `afero.NewRegexpFs(source, regexp)` | Filters a filesystem, only showing files that match a regex. | ✅ Official | +| **Utility** | **HttpFs** | `afero.NewHttpFs(source)` | Wraps any Afero filesystem to be served via `http.FileServer`. | ✅ Official | +| **Archives** | **ZipFs** | `zipfs.New(zipReader)` | Read-only access to files within a ZIP archive. | ✅ Official | +| | **TarFs** | `tarfs.New(tarReader)` | Read-only access to files within a TAR archive. | ✅ Official | +| **Network** | **GcsFs** | `gcsfs.NewGcsFs(...)` | Google Cloud Storage backend. | ⚡ Experimental | +| | **SftpFs** | `sftpfs.New(...)` | SFTP backend. | ⚡ Experimental | +| **3rd Party Cloud** | **S3Fs** | [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) | Production-ready S3 backend built on official AWS SDK. | 🔹 3rd Party | +| | **MinioFs** | [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) | MinIO object storage backend with S3 compatibility. | 🔹 3rd Party | +| | **DriveFs** | [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) | Google Drive backend with streaming support. | 🔹 3rd Party | +| | **DropboxFs** | [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) | Dropbox backend with streaming support. | 🔹 3rd Party | +| **3rd Party Specialized** | **GitFs** | [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) | Git repository filesystem (read-only, Afero compatible). | 🔹 3rd Party | +| | **DockerFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Docker container filesystem access. | 🔹 3rd Party | +| | **GitHubFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | GitHub repository and releases filesystem. | 🔹 3rd Party | +| | **FilterFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Filesystem filtering with predicates. | 🔹 3rd Party | +| | **IgnoreFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | .gitignore-aware filtering filesystem. | 🔹 3rd Party | +| | **FUSEFs** | [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) | Generic FUSE implementation using any Afero backend. | 🔹 3rd Party | + +## Afero vs. `io/fs` (Go 1.16+) + +Go 1.16 introduced the `io/fs` package, which provides a standard abstraction for **read-only** filesystems. + +Afero complements `io/fs` by focusing on different needs: + +* **Use `io/fs` when:** You only need to read files and want to conform strictly to the standard library interfaces. +* **Use Afero when:** + * Your application needs to **create, write, modify, or delete** files. + * You need to test complex read/write interactions (e.g., renaming, concurrent writes). + * You need advanced compositional features (Copy-on-Write, Caching, etc.). + +Afero is fully compatible with `io/fs`. You can wrap any Afero filesystem to satisfy the `fs.FS` interface using `afero.NewIOFS`: -The Http package requires a slightly specific version of Open which -returns an http.File type. +```go +import "io/fs" -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. +// Create an Afero filesystem (writable) +var myAferoFs afero.Fs = afero.NewMemMapFs() -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) +// Convert it to a standard library fs.FS (read-only view) +var myIoFs fs.FS = afero.NewIOFS(myAferoFs) ``` -## Composite Backends +## Third-Party Backends & Ecosystem -Afero provides the ability have two filesystems (or more) act as a single -file system. +The Afero community has developed numerous backends and tools that extend the library's capabilities. Below are curated, well-maintained options organized by maturity and reliability. -### CacheOnReadFs +### Featured Community Backends -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. +These are mature, reliable backends that we can confidently recommend for production use: -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. +#### **Amazon S3** - [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) +Production-ready S3 backend built on the official AWS SDK for Go. -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). +```go +import "github.com/fclairamb/afero-s3" -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. +s3fs := s3.NewFs(bucket, session) +``` -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. +#### **MinIO** - [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) +MinIO object storage backend providing S3-compatible object storage with deduplication and optimization features. ```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +import "github.com/cpyun/afero-minio" + +minioFs := miniofs.NewMinioFs(ctx, "minio://endpoint/bucket") ``` -### CopyOnWriteFs() +### Community & Specialized Backends -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. +#### Cloud Storage -Read operations will first look in the overlay and if not found there, will -serve the file from the base. +- **Google Drive** - [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) + Streaming support; no write-seeking or POSIX permissions; no files listing cache -Changes to the file system will only be made in the overlay. +- **Dropbox** - [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) + Streaming support; no write-seeking or POSIX permissions -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). +#### Version Control Systems -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. +- **Git Repositories** - [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) + Read-only filesystem abstraction for Git repositories. Works with bare repositories and provides filesystem view of any git reference. Uses go-git for repository access. -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) +#### Container and Remote Systems - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` +- **Docker Containers** - [`unmango/aferox`](https://github.com/unmango/aferox) + Access Docker container filesystems as if they were local filesystems + +- **GitHub API** - [`unmango/aferox`](https://github.com/unmango/aferox) + Turn GitHub repositories, releases, and assets into browsable filesystems -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. +#### FUSE Integration +- **Generic FUSE** - [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) + Mount any Afero filesystem as a FUSE filesystem, allowing any Afero backend to be used as a real mounted filesystem -## Desired/possible backends +#### Specialized Filesystems -The following is a short list of possible backends we hope someone will -implement: +- **FAT32 Support** - [`aligator/GoFAT`](https://github.com/aligator/GoFAT) + Pure Go FAT filesystem implementation (currently read-only) -* SSH -* S3 +### Interface Adapters & Utilities -# About the project +**Cross-Interface Compatibility:** +- [`jfontan/go-billy-desfacer`](https://github.com/jfontan/go-billy-desfacer) - Adapter between Afero and go-billy interfaces (for go-git compatibility) +- [`Maldris/go-billy-afero`](https://github.com/Maldris/go-billy-afero) - Alternative wrapper for using Afero with go-billy +- [`c4milo/afero2billy`](https://github.com/c4milo/afero2billy) - Another Afero to billy filesystem adapter -## What's in the name +**Working Directory Management:** +- [`carolynvs/aferox`](https://github.com/carolynvs/aferox) - Working directory-aware filesystem wrapper -Afero comes from the latin roots Ad-Facere. +**Advanced Filtering:** +- [`unmango/aferox`](https://github.com/unmango/aferox) includes multiple specialized filesystems: + - **FilterFs** - Predicate-based file filtering + - **IgnoreFs** - .gitignore-aware filtering + - **WriterFs** - Dump writes to io.Writer for debugging -**"Ad"** is a prefix meaning "to". +#### Developer Tools & Utilities -**"Facere"** is a form of the root "faciō" making "make or do". +**nhatthm Utility Suite** - Essential tools for Afero development: +- [`nhatthm/aferocopy`](https://github.com/nhatthm/aferocopy) - Copy files between any Afero filesystems +- [`nhatthm/aferomock`](https://github.com/nhatthm/aferomock) - Mocking toolkit for testing +- [`nhatthm/aferoassert`](https://github.com/nhatthm/aferoassert) - Assertion helpers for filesystem testing -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. +### Ecosystem Showcase -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". +**Windows Virtual Drives** - [`balazsgrill/potatodrive`](https://github.com/balazsgrill/potatodrive) +Mount any Afero filesystem as a Windows drive letter. Brilliant demonstration of Afero's power! -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. +### Modern Asset Embedding (Go 1.16+) -## Release Notes +Instead of third-party tools, use Go's native `//go:embed` with Afero: -See the [Releases Page](https://github.com/spf13/afero/releases). +```go +import ( + "embed" + "github.com/spf13/afero" +) + +//go:embed assets/* +var assetsFS embed.FS + +func main() { + // Convert embedded files to Afero filesystem + fs := afero.FromIOFS(assetsFS) + + // Use like any other Afero filesystem + content, _ := afero.ReadFile(fs, "assets/config.json") +} +``` ## Contributing -1. Fork it +We welcome contributions! The project is mature, but we are actively looking for contributors to help implement and stabilize network/cloud backends. + +* 🔥 **Microsoft Azure Blob Storage** +* 🔒 **Modern Encryption Backend** - Built on secure, contemporary crypto (not legacy EncFS) +* 🐙 **Canonical go-git Adapter** - Unified solution for Git integration +* 📡 **SSH/SCP Backend** - Secure remote file operations +* Stabilization of existing experimental backends (GCS, SFTP) + +To contribute: +1. Fork the repository 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releasing - -As of version 1.14.0, Afero moved implementations with third-party libraries to -their own submodules. - -Releasing a new version now requires a few steps: - -``` -VERSION=X.Y.Z -git tag -a v$VERSION -m "Release $VERSION" -git push origin v$VERSION - -cd gcsfs -go get github.com/spf13/afero@v$VERSION -go mod tidy -git commit -am "Update afero to v$VERSION" -git tag -a gcsfs/v$VERSION -m "Release gcsfs $VERSION" -git push origin gcsfs/v$VERSION -cd .. - -cd sftpfs -go get github.com/spf13/afero@v$VERSION -go mod tidy -git commit -am "Update afero to v$VERSION" -git tag -a sftpfs/v$VERSION -m "Release sftpfs $VERSION" -git push origin sftpfs/v$VERSION -cd .. - -git push -``` +5. Create a new Pull Request -TODO: move these instructions to a Makefile or something +## 📄 License -## Contributors +Afero is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) for details. -Names in no particular order: +## 🔗 Additional Resources -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) +- [📖 Full API Documentation](https://pkg.go.dev/github.com/spf13/afero) +- [🎯 Examples Repository](https://github.com/spf13/afero/tree/master/examples) +- [📋 Release Notes](https://github.com/spf13/afero/releases) +- [❓ GitHub Discussions](https://github.com/spf13/afero/discussions) -## License +--- -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) +*Afero comes from the Latin roots Ad-Facere, meaning "to make" or "to do" - fitting for a library that empowers you to make and do amazing things with filesystems.* diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index 184d6dd702..aba2879ebb 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -34,7 +34,8 @@ func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { _, err := u.base.Stat(name) if err != nil { if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || + oerr.Err == syscall.ENOTDIR { return false, nil } } @@ -237,7 +238,11 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, return u.layer.OpenFile(name, flag, perm) } - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: syscall.ENOTDIR, + } // ...or os.ErrNotExist? } if b { return u.base.OpenFile(name, flag, perm) diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index b13155ca4a..57ba5673ec 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -137,7 +137,7 @@ type readDirFile struct { var _ fs.ReadDirFile = readDirFile{} func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) + items, err := r.Readdir(n) if err != nil { return nil, err } @@ -161,7 +161,12 @@ var _ Fs = FromIOFS{} func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } +func (f FromIOFS) Mkdir( + name string, + perm os.FileMode, +) error { + return notImplemented("mkdir", name) +} func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { return notImplemented("mkdirall", path) diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go index 89c1bfc0a7..2dcbdb1f09 100644 --- a/vendor/github.com/spf13/afero/lstater.go +++ b/vendor/github.com/spf13/afero/lstater.go @@ -19,9 +19,9 @@ import ( // Lstater is an optional interface in Afero. It is only implemented by the // filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// It will call Lstat if the filesystem itself is, or it delegates to, the os filesystem. // Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +// In addition to the FileInfo, it will return a boolean telling whether Lstat was called or not. type Lstater interface { LstatIfPossible(name string) (os.FileInfo, bool, error) } diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 62fe4498e1..c77fcd40e9 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -150,7 +150,11 @@ func (f *File) Sync() error { func (f *File) Readdir(count int) (res []os.FileInfo, err error) { if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + return nil, &os.PathError{ + Op: "readdir", + Path: f.fileData.name, + Err: errors.New("not a dir"), + } } var outLength int64 @@ -236,7 +240,11 @@ func (f *File) Truncate(size int64) error { return ErrFileClosed } if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return &os.PathError{ + Op: "truncate", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } if size < 0 { return ErrOutOfRange @@ -273,7 +281,11 @@ func (f *File) Write(b []byte) (n int, err error) { return 0, ErrFileClosed } if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return 0, &os.PathError{ + Op: "write", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } n = len(b) cur := atomic.LoadInt64(&f.at) @@ -285,7 +297,9 @@ func (f *File) Write(b []byte) (n int, err error) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) + f.fileData.data = append( + f.fileData.data, + append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 62dd6c93c8..2e2253f55c 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -92,7 +92,8 @@ func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { func (f *UnionFile) Write(s []byte) (n int, err error) { if f.Layer != nil { n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + if err == nil && + f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? _, err = f.Base.Write(s) } return n, err @@ -157,7 +158,7 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err // return a single view of the overlayed directories. // At the end of the directory view, the error is io.EOF if c > 0. func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger + merge := f.Merger if merge == nil { merge = defaultUnionMergeDirsFn } diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 9e4cba2746..2317688388 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -113,11 +113,11 @@ func GetTempDir(fs Fs, subPath string) string { if subPath != "" { // preserve windows backslash :-( if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) + subPath = strings.ReplaceAll(subPath, "\\", "____") } dir = dir + UnicodeSanitize((subPath)) if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) + dir = strings.ReplaceAll(dir, "____", "\\") } if exists, _ := Exists(fs, dir); exists { diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 6acf8ab1ea..104dc24407 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -57,3 +57,10 @@ linters: - common-false-positives - legacy - std-error-handling + settings: + govet: + # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build). + # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17. + # This can be removed once Cobra requires Go 1.17 or higher. + disable: + - buildtag diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 78088db69c..c05fed45ae 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { } } -var minUsagePadding = 25 +const minUsagePadding = 25 // UsagePadding return padding for the usage. func (c *Command) UsagePadding() int { @@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int { return c.parent.commandsMaxUseLen } -var minCommandPathPadding = 11 +const minCommandPathPadding = 11 // CommandPathPadding return padding for the command path. func (c *Command) CommandPathPadding() int { @@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int { return c.parent.commandsMaxCommandPathLen } -var minNamePadding = 11 +const minNamePadding = 11 // NamePadding returns padding for the name. func (c *Command) NamePadding() int { @@ -1939,7 +1939,7 @@ type tmplFunc struct { fn func(io.Writer, interface{}) error } -var defaultUsageTemplate = `Usage:{{if .Runnable}} +const defaultUsageTemplate = `Usage:{{if .Runnable}} {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} @@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { return nil } -var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} +const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` @@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error { return nil } -var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} ` // defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go index ce57a4c210..3110794e77 100644 --- a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go @@ -5,6 +5,7 @@ import ( "go/ast" "go/token" "regexp" + "strings" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -86,10 +87,20 @@ func checkForHostPortConstruction(sprintf *ast.CallExpr) error { regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+-.]*://[^/]*@%s:.*$`), // URL with basic auth } - for _, re := range regexes { - if re.MatchString(fs) { - return fmt.Errorf("host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf") + for idx, re := range regexes { + if !re.MatchString(fs) { + continue } + + // Match without basic auth and only handle cases where the hostname and optionally the port are specified. + if idx == 0 && len(sprintf.Args) <= 3 { + arg, ok := getStringLiteral(sprintf.Args, 1) + if ok && !strings.Contains(arg, ":") { + continue + } + } + + return fmt.Errorf("host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf") } return nil diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 0000000000..2683e4bb1f --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/NOTICE b/vendor/go.yaml.in/yaml/v3/NOTICE new file mode 100644 index 0000000000..866d74a7ad --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 0000000000..15a85a6350 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 0000000000..05fd305da1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 0000000000..02e2b17bfe --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 0000000000..ab4e03ba72 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 0000000000..de9e72a3e6 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 0000000000..25fe823637 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 0000000000..56af245366 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 0000000000..64ae888057 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 0000000000..30b1f08920 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 0000000000..9210ece7e9 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 0000000000..266d0b092c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 0000000000..0b101cd20d --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 0000000000..f59aa40f64 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 0000000000..dea1ba9610 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index 2d7486804f..f58de029ea 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -94,7 +94,7 @@ func (x *FileSyntax) Span() (start, end Position) { // line, the new line is added at the end of the block containing hint, // extracting hint into a new block if it is not yet in one. // -// If the hint is non-nil buts its first token does not match, +// If the hint is non-nil but its first token does not match, // the new line is added after the block containing hint // (or hint itself, if not in a block). // diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 16e1aa7ab4..9d3955bd73 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -261,7 +261,7 @@ func modPathOK(r rune) bool { // importPathOK reports whether r can appear in a package import path element. // -// Import paths are intermediate between module paths and file paths: we allow +// Import paths are intermediate between module paths and file paths: we // disallow characters that would be confusing or ambiguous as arguments to // 'go get' (such as '@' and ' ' ), but allow certain characters that are // otherwise-unambiguous on the command line and historically used for some diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 628f8fd687..824b282c83 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -45,8 +45,8 @@ func IsValid(v string) bool { // Canonical returns the canonical formatting of the semantic version v. // It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. +// Two semantic versions compare equal only if their canonical formatting +// is an identical string. // The canonical invalid semantic version is the empty string. func Canonical(v string) string { p, ok := parse(v) diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go index f6118bec64..527540c62c 100644 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -33,8 +33,9 @@ type Diagnostic struct { URL string // SuggestedFixes is an optional list of fixes to address the - // problem described by the diagnostic. Each one represents - // an alternative strategy; at most one may be applied. + // problem described by the diagnostic. Each one represents an + // alternative strategy, and should have a distinct and + // descriptive message; at most one may be applied. // // Fixes for different diagnostics should be treated as // independent changes to the same baseline file state, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go index e554c3cc90..8ccf982d23 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) //go:embed doc.go @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "appends", - Doc: analysisutil.MustExtractDoc(doc, "appends"), + Doc: analyzerutil.MustExtractDoc(doc, "appends"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index efbf05d596..ba9ca38a81 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -19,7 +19,7 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "report mismatches between assembly files and Go declarations" @@ -175,7 +175,7 @@ func run(pass *analysis.Pass) (any, error) { Files: for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analyzerutil.ReadFile(pass, fname) if err != nil { return nil, err } @@ -211,7 +211,7 @@ Files: resultStr = "result register" } for _, line := range retLine { - pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) + pass.Reportf(tf.LineStart(line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) } } retLine = nil @@ -227,7 +227,7 @@ Files: lineno++ badf := func(format string, args ...any) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) + pass.Reportf(tf.LineStart(lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) } if arch == "" { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index dfe68d9b15..69734df825 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -17,9 +17,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -27,26 +29,26 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "assign", - Doc: analysisutil.MustExtractDoc(doc, "assign"), + Doc: analyzerutil.MustExtractDoc(doc, "assign"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) - nodeFilter := []ast.Node{ - (*ast.AssignStmt)(nil), - } - inspect.Preorder(nodeFilter, func(n ast.Node) { - stmt := n.(*ast.AssignStmt) + for curAssign := range inspect.Root().Preorder((*ast.AssignStmt)(nil)) { + stmt := curAssign.Node().(*ast.AssignStmt) if stmt.Tok != token.ASSIGN { - return // ignore := + continue // ignore := } if len(stmt.Lhs) != len(stmt.Rhs) { // If LHS and RHS have different cardinality, they can't be the same. - return + continue } // Delete redundant LHS, RHS pairs, taking care @@ -61,9 +63,9 @@ func run(pass *analysis.Pass) (any, error) { isSelfAssign := false var le string - if !analysisutil.HasSideEffects(pass.TypesInfo, lhs) && - !analysisutil.HasSideEffects(pass.TypesInfo, rhs) && - !isMapIndex(pass.TypesInfo, lhs) && + if typesinternal.NoEffects(info, lhs) && + typesinternal.NoEffects(info, rhs) && + !isMapIndex(info, lhs) && reflect.TypeOf(lhs) == reflect.TypeOf(rhs) { // short-circuit the heavy-weight gofmt check le = astutil.Format(pass.Fset, lhs) @@ -109,13 +111,14 @@ func run(pass *analysis.Pass) (any, error) { } if len(exprs) == 0 { - return + continue } if len(exprs) == len(stmt.Lhs) { // If every part of the statement is a self-assignment, // remove the whole statement. - edits = []analysis.TextEdit{{Pos: stmt.Pos(), End: stmt.End()}} + tokFile := pass.Fset.File(stmt.Pos()) + edits = refactor.DeleteStmt(tokFile, curAssign) } pass.Report(analysis.Diagnostic{ @@ -126,7 +129,7 @@ func run(pass *analysis.Pass) (any, error) { TextEdits: edits, }}, }) - }) + } return nil, nil } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go index ddd875b23b..c6ab7ff7a2 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -11,9 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "atomic", - Doc: analysisutil.MustExtractDoc(doc, "atomic"), + Doc: analyzerutil.MustExtractDoc(doc, "atomic"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go index 3c2a82dce3..574fafaa95 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) const Doc = "check for common mistakes involving boolean operators" @@ -84,7 +84,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* i := 0 var sets [][]ast.Expr for j := 0; j <= len(exprs); j++ { - if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) { + if j == len(exprs) || !typesinternal.NoEffects(info, exprs[j]) { if i < j { sets = append(sets, exprs[i:j]) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go index f49fea5176..37c878ef31 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go @@ -11,10 +11,14 @@ package buildssa import ( "go/ast" "go/types" + "iter" "reflect" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/ctrlflow" + "golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/ssainternal" ) var Analyzer = &analysis.Analyzer{ @@ -22,7 +26,13 @@ var Analyzer = &analysis.Analyzer{ Doc: "build SSA-form IR for later passes", URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/buildssa", Run: run, - ResultType: reflect.TypeOf(new(SSA)), + Requires: []*analysis.Analyzer{ctrlflow.Analyzer}, + ResultType: reflect.TypeFor[*SSA](), + // Do not add FactTypes here: SSA construction of P must not + // require SSA construction of all of P's dependencies. + // (That's why we enlist the cheaper ctrlflow pass to compute + // noreturn instead of having go/ssa + buildssa do it.) + FactTypes: nil, } // SSA provides SSA-form intermediate representation for all the @@ -33,6 +43,8 @@ type SSA struct { } func run(pass *analysis.Pass) (any, error) { + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + // We must create a new Program for each Package because the // analysis API provides no place to hang a Program shared by // all Packages. Consequently, SSA Packages and Functions do not @@ -49,6 +61,11 @@ func run(pass *analysis.Pass) (any, error) { prog := ssa.NewProgram(pass.Fset, mode) + // Use the result of the ctrlflow analysis to improve the SSA CFG. + ssainternal.SetNoReturn(prog, func(fn *types.Func) bool { + return ctrlflowinternal.NoReturn(cfgs, fn) + }) + // Create SSA packages for direct imports. for _, p := range pass.Pkg.Imports() { prog.CreatePackage(p, nil, nil, true) @@ -61,34 +78,41 @@ func run(pass *analysis.Pass) (any, error) { // Compute list of source functions, including literals, // in source order. var funcs []*ssa.Function - for _, f := range pass.Files { - for _, decl := range f.Decls { - if fdecl, ok := decl.(*ast.FuncDecl); ok { - // (init functions have distinct Func - // objects named "init" and distinct - // ssa.Functions named "init#1", ...) - - fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func) - if fn == nil { - panic(fn) - } + for _, fn := range allFunctions(pass) { + // (init functions have distinct Func + // objects named "init" and distinct + // ssa.Functions named "init#1", ...) - f := ssapkg.Prog.FuncValue(fn) - if f == nil { - panic(fn) - } + f := ssapkg.Prog.FuncValue(fn) + if f == nil { + panic(fn) + } - var addAnons func(f *ssa.Function) - addAnons = func(f *ssa.Function) { - funcs = append(funcs, f) - for _, anon := range f.AnonFuncs { - addAnons(anon) - } - } - addAnons(f) + var addAnons func(f *ssa.Function) + addAnons = func(f *ssa.Function) { + funcs = append(funcs, f) + for _, anon := range f.AnonFuncs { + addAnons(anon) } } + addAnons(f) } return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil } + +// allFunctions returns an iterator over all named functions. +func allFunctions(pass *analysis.Pass) iter.Seq2[*ast.FuncDecl, *types.Func] { + return func(yield func(*ast.FuncDecl, *types.Func) bool) { + for _, file := range pass.Files { + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + fn := pass.TypesInfo.Defs[decl.Name].(*types.Func) + if !yield(decl, fn) { + return + } + } + } + } + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index 91aac67625..d0b28e5b84 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -14,9 +14,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/versions" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "check //go:build and // +build directives" @@ -57,6 +55,7 @@ func runBuildTag(pass *analysis.Pass) (any, error) { func checkGoFile(pass *analysis.Pass, f *ast.File) { var check checker check.init(pass) + defer check.finish() for _, group := range f.Comments { // A +build comment is ignored after or adjoining the package declaration. @@ -78,27 +77,6 @@ func checkGoFile(pass *analysis.Pass, f *ast.File) { check.comment(c.Slash, c.Text) } } - - check.finish() - - // For Go 1.18+ files, offer a fix to remove the +build lines - // if they passed all consistency checks. - if check.crossCheck && !versions.Before(pass.TypesInfo.FileVersions[f], "go1.18") { - for _, rng := range check.plusBuildRanges { - check.pass.Report(analysis.Diagnostic{ - Pos: rng.Pos(), - End: rng.End(), - Message: "+build line is no longer needed", - SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove obsolete +build line", - TextEdits: []analysis.TextEdit{{ - Pos: rng.Pos(), - End: rng.End(), - }}, - }}, - }) - } - } } func checkOtherFile(pass *analysis.Pass, filename string) error { @@ -108,7 +86,7 @@ func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since this may not be a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analyzerutil.ReadFile(pass, filename) if err != nil { return err } @@ -118,15 +96,15 @@ func checkOtherFile(pass *analysis.Pass, filename string) error { } type checker struct { - pass *analysis.Pass - plusBuildOK bool // "+build" lines still OK - goBuildOK bool // "go:build" lines still OK - crossCheck bool // cross-check go:build and +build lines when done reading file - inStar bool // currently in a /* */ comment - goBuildPos token.Pos // position of first go:build line found - plusBuildRanges []analysis.Range // range of each "+build" line found - goBuild constraint.Expr // go:build constraint found - plusBuild constraint.Expr // AND of +build constraints found + pass *analysis.Pass + plusBuildOK bool // "+build" lines still OK + goBuildOK bool // "go:build" lines still OK + crossCheck bool // cross-check go:build and +build lines when done reading file + inStar bool // currently in a /* */ comment + goBuildPos token.Pos // position of first go:build line found + plusBuildPos token.Pos // position of first "+build" line found + goBuild constraint.Expr // go:build constraint found + plusBuild constraint.Expr // AND of +build constraints found } func (check *checker) init(pass *analysis.Pass) { @@ -294,8 +272,6 @@ func (check *checker) goBuildLine(pos token.Pos, line string) { } func (check *checker) plusBuildLine(pos token.Pos, line string) { - plusBuildRange := analysisinternal.Range(pos, pos+token.Pos(len(line))) - line = strings.TrimSpace(line) if !constraint.IsPlusBuild(line) { // Comment with +build but not at beginning. @@ -310,7 +286,9 @@ func (check *checker) plusBuildLine(pos token.Pos, line string) { check.crossCheck = false } - check.plusBuildRanges = append(check.plusBuildRanges, plusBuildRange) + if check.plusBuildPos == token.NoPos { + check.plusBuildPos = pos + } // testing hack: stop at // ERROR if i := strings.Index(line, " // ERROR "); i >= 0 { @@ -358,19 +336,19 @@ func (check *checker) plusBuildLine(pos token.Pos, line string) { } func (check *checker) finish() { - if !check.crossCheck || len(check.plusBuildRanges) == 0 || check.goBuildPos == token.NoPos { + if !check.crossCheck || check.plusBuildPos == token.NoPos || check.goBuildPos == token.NoPos { return } // Have both //go:build and // +build, // with no errors found (crossCheck still true). // Check they match. + var want constraint.Expr lines, err := constraint.PlusBuildLines(check.goBuild) if err != nil { check.pass.Reportf(check.goBuildPos, "%v", err) return } - var want constraint.Expr for _, line := range lines { y, err := constraint.Parse(line) if err != nil { @@ -385,8 +363,7 @@ func (check *checker) finish() { } } if want.String() != check.plusBuild.String() { - check.pass.ReportRangef(check.plusBuildRanges[0], "+build lines do not match //go:build condition") - check.crossCheck = false // don't offer fix to remove +build + check.pass.Reportf(check.plusBuildPos, "+build lines do not match //go:build condition") return } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index bf1202b92b..54b8062cc0 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -350,8 +350,8 @@ func typeOKForCgoCall(t types.Type, m map[types.Type]bool) bool { case *types.Array: return typeOKForCgoCall(t.Elem(), m) case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - if !typeOKForCgoCall(t.Field(i).Type(), m) { + for field := range t.Fields() { + if !typeOKForCgoCall(field.Type(), m) { return false } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index 4190cc5900..208602f486 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -328,8 +328,8 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ ttyp, ok := typ.Underlying().(*types.Tuple) if ok { - for i := 0; i < ttyp.Len(); i++ { - subpath := lockPath(tpkg, ttyp.At(i).Type(), seen) + for v := range ttyp.Variables() { + subpath := lockPath(tpkg, v.Type(), seen) if subpath != nil { return append(subpath, typ.String()) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go index 951aaed00f..d6c2586e73 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go @@ -16,9 +16,12 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/cfginternal" + "golang.org/x/tools/internal/typesinternal" ) var Analyzer = &analysis.Analyzer{ @@ -26,7 +29,7 @@ var Analyzer = &analysis.Analyzer{ Doc: "build a control-flow graph", URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ctrlflow", Run: run, - ResultType: reflect.TypeOf(new(CFGs)), + ResultType: reflect.TypeFor[*CFGs](), FactTypes: []analysis.Fact{new(noReturn)}, Requires: []*analysis.Analyzer{inspect.Analyzer}, } @@ -44,7 +47,20 @@ type CFGs struct { defs map[*ast.Ident]types.Object // from Pass.TypesInfo.Defs funcDecls map[*types.Func]*declInfo funcLits map[*ast.FuncLit]*litInfo - pass *analysis.Pass // transient; nil after construction + noReturn map[*types.Func]bool // functions lacking a reachable return statement + pass *analysis.Pass // transient; nil after construction +} + +// TODO(adonovan): add (*CFGs).NoReturn to public API. +func (c *CFGs) isNoReturn(fn *types.Func) bool { + return c.noReturn[fn] +} + +func init() { + // Expose the hidden method to callers in x/tools. + ctrlflowinternal.NoReturn = func(c any, fn *types.Func) bool { + return c.(*CFGs).isNoReturn(fn) + } } // CFGs has two maps: funcDecls for named functions and funcLits for @@ -54,15 +70,14 @@ type CFGs struct { // *types.Func but not the other way. type declInfo struct { - decl *ast.FuncDecl - cfg *cfg.CFG // iff decl.Body != nil - started bool // to break cycles - noReturn bool + decl *ast.FuncDecl + cfg *cfg.CFG // iff decl.Body != nil + started bool // to break cycles } type litInfo struct { cfg *cfg.CFG - noReturn bool + noReturn bool // (currently unused) } // FuncDecl returns the control-flow graph for a named function. @@ -118,6 +133,7 @@ func run(pass *analysis.Pass) (any, error) { defs: pass.TypesInfo.Defs, funcDecls: funcDecls, funcLits: funcLits, + noReturn: make(map[*types.Func]bool), pass: pass, } @@ -138,7 +154,7 @@ func run(pass *analysis.Pass) (any, error) { li := funcLits[lit] if li.cfg == nil { li.cfg = cfg.New(lit.Body, c.callMayReturn) - if !hasReachableReturn(li.cfg) { + if cfginternal.IsNoReturn(li.cfg) { li.noReturn = true } } @@ -158,27 +174,28 @@ func (c *CFGs) buildDecl(fn *types.Func, di *declInfo) { // The buildDecl call tree thus resembles the static call graph. // We mark each node when we start working on it to break cycles. - if !di.started { // break cycle - di.started = true + if di.started { + return // break cycle + } + di.started = true - if isIntrinsicNoReturn(fn) { - di.noReturn = true - } - if di.decl.Body != nil { - di.cfg = cfg.New(di.decl.Body, c.callMayReturn) - if !hasReachableReturn(di.cfg) { - di.noReturn = true - } - } - if di.noReturn { - c.pass.ExportObjectFact(fn, new(noReturn)) - } + noreturn := isIntrinsicNoReturn(fn) - // debugging - if false { - log.Printf("CFG for %s:\n%s (noreturn=%t)\n", fn, di.cfg.Format(c.pass.Fset), di.noReturn) + if di.decl.Body != nil { + di.cfg = cfg.New(di.decl.Body, c.callMayReturn) + if cfginternal.IsNoReturn(di.cfg) { + noreturn = true } } + if noreturn { + c.pass.ExportObjectFact(fn, new(noReturn)) + c.noReturn[fn] = true + } + + // debugging + if false { + log.Printf("CFG for %s:\n%s (noreturn=%t)\n", fn, di.cfg.Format(c.pass.Fset), noreturn) + } } // callMayReturn reports whether the called function may return. @@ -201,31 +218,26 @@ func (c *CFGs) callMayReturn(call *ast.CallExpr) (r bool) { // Function or method declared in this package? if di, ok := c.funcDecls[fn]; ok { c.buildDecl(fn, di) - return !di.noReturn + return !c.noReturn[fn] } // Not declared in this package. // Is there a fact from another package? - return !c.pass.ImportObjectFact(fn, new(noReturn)) + if c.pass.ImportObjectFact(fn, new(noReturn)) { + c.noReturn[fn] = true + return false + } + + return true } var panicBuiltin = types.Universe.Lookup("panic").(*types.Builtin) -func hasReachableReturn(g *cfg.CFG) bool { - for _, b := range g.Blocks { - if b.Live && b.Return() != nil { - return true - } - } - return false -} - // isIntrinsicNoReturn reports whether a function intrinsically never // returns because it stops execution of the calling thread. // It is the base case in the recursion. func isIntrinsicNoReturn(fn *types.Func) bool { // Add functions here as the need arises, but don't allocate memory. - path, name := fn.Pkg().Path(), fn.Name() - return path == "syscall" && (name == "Exit" || name == "ExitProcess" || name == "ExitThread") || - path == "runtime" && name == "Goexit" + return typesinternal.IsFunctionNamed(fn, "syscall", "Exit", "ExitProcess", "ExitThread") || + typesinternal.IsFunctionNamed(fn, "runtime", "Goexit") } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go index 5e3d1a3535..32087cd71a 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go @@ -96,8 +96,8 @@ func containsError(typ types.Type) bool { case *types.Map: return check(t.Key()) || check(t.Elem()) case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - if check(t.Field(i).Type()) { + for field := range t.Fields() { + if check(field.Type()) { return true } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go index bf62d327d9..af93407cae 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go @@ -10,9 +10,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -23,8 +23,8 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "defers", Requires: []*analysis.Analyzer{inspect.Analyzer}, + Doc: analyzerutil.MustExtractDoc(doc, "defers"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers", - Doc: analysisutil.MustExtractDoc(doc, "defers"), Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go index bebec89140..5fa28861e5 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -14,7 +14,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = `check Go toolchain directives such as //go:debug @@ -86,7 +86,7 @@ func checkGoFile(pass *analysis.Pass, f *ast.File) { func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since is not a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analyzerutil.ReadFile(pass, filename) if err != nil { return err } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index b3df99929d..f1465f7343 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -12,7 +12,7 @@ import ( "go/types" "golang.org/x/tools/go/analysis" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/typesinternal/typeindex" ) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go index 4987ec5afd..235fa4f01f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go @@ -18,6 +18,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/astutil" ) const Doc = `find structs that would use less memory if their fields were sorted @@ -103,6 +104,11 @@ func fieldalignment(pass *analysis.Pass, node *ast.StructType, typ *types.Struct return } + // Analyzers borrow syntax tree; they do not own them and must modify them. + // This Clone operation is a quick fix to the data race introduced + // in CL 278872 by the clearing of the Comment and Doc fields below. + node = astutil.CloneNode(node) + // Flatten the ast node since it could have multiple field names per list item while // *types.Struct only have one item per field. // TODO: Preserve multi-named fields instead of flattening. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index ff9c8b4f81..a7d558103a 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -13,7 +13,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "report assembly that clobbers the frame pointer before saving it" @@ -98,7 +98,7 @@ func run(pass *analysis.Pass) (any, error) { } for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analyzerutil.ReadFile(pass, fname) if err != nil { return nil, err } @@ -127,7 +127,7 @@ func run(pass *analysis.Pass) (any, error) { } if arch.isFPWrite(line) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "frame pointer is clobbered before saving") + pass.Reportf(tf.LineStart(lineno), "frame pointer is clobbered before saving") active = false continue } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go b/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go index 07f154963e..d41a0e4cbf 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go @@ -17,7 +17,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/types/typeutil" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/typesinternal/typeindex" ) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/httpmux/httpmux.go b/vendor/golang.org/x/tools/go/analysis/passes/httpmux/httpmux.go index a4f00e2c56..f27104975b 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/httpmux/httpmux.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/httpmux/httpmux.go @@ -84,7 +84,7 @@ func isServeMuxRegisterCall(pass *analysis.Pass, call *ast.CallExpr) bool { if !isMethodNamed(fn, "net/http", "Handle", "HandleFunc") { return false } - recv := fn.Type().(*types.Signature).Recv() // isMethodNamed() -> non-nil + recv := fn.Signature().Recv() // isMethodNamed() -> non-nil isPtr, named := typesinternal.ReceiverNamed(recv) return isPtr && typesinternal.IsTypeNamed(named, "net/http", "ServeMux") } @@ -92,7 +92,7 @@ func isServeMuxRegisterCall(pass *analysis.Pass, call *ast.CallExpr) bool { // isMethodNamed reports when a function f is a method, // in a package with the path pkgPath and the name of f is in names. // -// (Unlike [analysisinternal.IsMethodNamed], it ignores the receiver type name.) +// (Unlike [analysis.IsMethodNamed], it ignores the receiver type name.) func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f == nil { return false @@ -100,7 +100,7 @@ func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f.Pkg() == nil || f.Pkg().Path() != pkgPath { return false // not at pkgPath } - if f.Type().(*types.Signature).Recv() == nil { + if f.Signature().Recv() == nil { return false // not a method } return slices.Contains(names, f.Name()) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index 4022dbe7c2..da0acbd8e2 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -11,8 +11,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typeparams" ) @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "ifaceassert", - Doc: analysisutil.MustExtractDoc(doc, "ifaceassert"), + Doc: analyzerutil.MustExtractDoc(doc, "ifaceassert"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go index ee1972f56d..aae5d255f9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{ URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", Run: run, RunDespiteErrors: true, - ResultType: reflect.TypeOf(new(inspector.Inspector)), + ResultType: reflect.TypeFor[*inspector.Inspector](), } func run(pass *analysis.Pass) (any, error) { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go deleted file mode 100644 index d3df898d30..0000000000 --- a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package analysisutil defines various helper functions -// used by two or more packages beneath go/analysis. -package analysisutil - -import ( - "go/ast" - "go/token" - "go/types" - "os" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" -) - -// HasSideEffects reports whether evaluation of e has side effects. -func HasSideEffects(info *types.Info, e ast.Expr) bool { - safe := true - ast.Inspect(e, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.CallExpr: - typVal := info.Types[n.Fun] - switch { - case typVal.IsType(): - // Type conversion, which is safe. - case typVal.IsBuiltin(): - // Builtin func, conservatively assumed to not - // be safe for now. - safe = false - return false - default: - // A non-builtin func or method call. - // Conservatively assume that all of them have - // side effects for now. - safe = false - return false - } - case *ast.UnaryExpr: - if n.Op == token.ARROW { - safe = false - return false - } - } - return true - }) - return !safe -} - -// ReadFile reads a file and adds it to the FileSet -// so that we can report errors against it using lineStart. -func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { - readFile := pass.ReadFile - if readFile == nil { - readFile = os.ReadFile - } - content, err := readFile(filename) - if err != nil { - return nil, nil, err - } - tf := pass.Fset.AddFile(filename, -1, len(content)) - tf.SetLinesForContent(content) - return content, tf, nil -} - -// LineStart returns the position of the start of the specified line -// within file f, or NoPos if there is no line of that number. -func LineStart(f *token.File, line int) token.Pos { - // Use binary search to find the start offset of this line. - // - // TODO(adonovan): eventually replace this function with the - // simpler and more efficient (*go/token.File).LineStart, added - // in go1.12. - - min := 0 // inclusive - max := f.Size() // exclusive - for { - offset := (min + max) / 2 - pos := f.Pos(offset) - posn := f.Position(pos) - if posn.Line == line { - return pos - (token.Pos(posn.Column) - 1) - } - - if min+1 >= max { - return token.NoPos - } - - if posn.Line < line { - min = offset - } else { - max = offset - } - } -} - -var MustExtractDoc = analysisinternal.MustExtractDoc diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal/ctrlflowinternal.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal/ctrlflowinternal.go new file mode 100644 index 0000000000..ee7a37228e --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal/ctrlflowinternal.go @@ -0,0 +1,17 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctrlflowinternal exposes internals of ctrlflow. +// It cannot actually depend on symbols from ctrlflow. +package ctrlflowinternal + +import "go/types" + +// NoReturn exposes the (*ctrlflow.CFGs).NoReturn method to the buildssa analyzer. +// +// You must link [golang.org/x/tools/go/analysis/passes/ctrlflow] into your +// application for it to be non-nil. +var NoReturn = func(cfgs any, fn *types.Func) bool { + panic("x/tools/go/analysis/passes/ctrlflow is not linked into this application") +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go index 8432e963c6..41b19d7933 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go @@ -11,9 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "loopclosure", - Doc: analysisutil.MustExtractDoc(doc, "loopclosure"), + Doc: analyzerutil.MustExtractDoc(doc, "loopclosure"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/loopclosure", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -55,8 +55,8 @@ func run(pass *analysis.Pass) (any, error) { switch n := n.(type) { case *ast.File: // Only traverse the file if its goversion is strictly before go1.22. - goversion := versions.FileVersion(pass.TypesInfo, n) - return versions.Before(goversion, versions.Go1_22) + return !analyzerutil.FileUsesGoVersion(pass, n, versions.Go1_22) + case *ast.RangeStmt: body = n.Body addVar(n.Key) @@ -308,12 +308,11 @@ func parallelSubtest(info *types.Info, call *ast.CallExpr) []ast.Stmt { if !ok { continue } - expr := exprStmt.X - if isMethodCall(info, expr, "testing", "T", "Parallel") { - call, _ := expr.(*ast.CallExpr) - if call == nil { - continue - } + call, ok := exprStmt.X.(*ast.CallExpr) + if !ok { + continue + } + if isMethodCall(info, call, "testing", "T", "Parallel") { x, _ := call.Fun.(*ast.SelectorExpr) if x == nil { continue @@ -347,26 +346,6 @@ func unlabel(stmt ast.Stmt) (ast.Stmt, bool) { } } -// isMethodCall reports whether expr is a method call of -// ... -func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method string) bool { - call, ok := expr.(*ast.CallExpr) - if !ok { - return false - } - - // Check that we are calling a method - f := typeutil.StaticCallee(info, call) - if f == nil || f.Name() != method { - return false - } - recv := f.Type().(*types.Signature).Recv() - if recv == nil { - return false - } - - // Check that the receiver is a . or - // *.. - _, named := typesinternal.ReceiverNamed(recv) - return typesinternal.IsTypeNamed(named, pkgPath, typeName) +func isMethodCall(info *types.Info, call *ast.CallExpr, pkgPath, typeName, method string) bool { + return typesinternal.IsMethodNamed(typeutil.Callee(info, call), pkgPath, typeName, method) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index cc0bf0fd31..28a5f6cd93 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/ctrlflow" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "lostcancel", - Doc: analysisutil.MustExtractDoc(doc, "lostcancel"), + Doc: analyzerutil.MustExtractDoc(doc, "lostcancel"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/lostcancel", Run: run, Requires: []*analysis.Analyzer{ @@ -316,8 +316,8 @@ outer: } func tupleContains(tuple *types.Tuple, v *types.Var) bool { - for i := 0; i < tuple.Len(); i++ { - if tuple.At(i) == v { + for v0 := range tuple.Variables() { + if v0 == v { return true } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go index 05999f8f2b..579ab865da 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go @@ -9,29 +9,21 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/versions" ) var AnyAnalyzer = &analysis.Analyzer{ - Name: "any", - Doc: analysisinternal.MustExtractDoc(doc, "any"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: runAny, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#any", + Name: "any", + Doc: analyzerutil.MustExtractDoc(doc, "any"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: runAny, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#any", } // The any pass replaces interface{} with go1.18's 'any'. func runAny(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.18") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_18) { for curIface := range curFile.Preorder((*ast.InterfaceType)(nil)) { iface := curIface.Node().(*ast.InterfaceType) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go index eb1ac170c6..ad45d74478 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go @@ -15,20 +15,19 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/moreiters" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var BLoopAnalyzer = &analysis.Analyzer{ Name: "bloop", - Doc: analysisinternal.MustExtractDoc(doc, "bloop"), + Doc: analyzerutil.MustExtractDoc(doc, "bloop"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -45,16 +44,13 @@ var BLoopAnalyzer = &analysis.Analyzer{ // for i := 0; i < b.N; i++ {} => for b.Loop() {} // for range b.N {} func bloop(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - if !typesinternal.Imports(pass.Pkg, "testing") { return nil, nil } var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo ) // edits computes the text edits for a matched for/range loop @@ -102,7 +98,7 @@ func bloop(pass *analysis.Pass) (any, error) { (*ast.ForStmt)(nil), (*ast.RangeStmt)(nil), } - for curFile := range filesUsing(inspect, info, "go1.24") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_24) { for curLoop := range curFile.Preorder(loops...) { switch n := curLoop.Node().(type) { case *ast.ForStmt: @@ -189,6 +185,7 @@ func enclosingFunc(c inspector.Cursor) (inspector.Cursor, bool) { // 2. The only b.N loop in that benchmark function // - b.Loop() can only be called once per benchmark execution // - Multiple calls result in "B.Loop called with timer stopped" error +// - Multiple loops may have complex interdependencies that are hard to analyze func usesBenchmarkNOnce(c inspector.Cursor, info *types.Info) bool { // Find the enclosing benchmark function curFunc, ok := enclosingFunc(c) @@ -205,17 +202,14 @@ func usesBenchmarkNOnce(c inspector.Cursor, info *types.Info) bool { return false } - // Count b.N references in this benchmark function + // Count all b.N references in this benchmark function (including nested functions) bnRefCount := 0 - filter := []ast.Node{(*ast.SelectorExpr)(nil), (*ast.FuncLit)(nil)} + filter := []ast.Node{(*ast.SelectorExpr)(nil)} curFunc.Inspect(filter, func(cur inspector.Cursor) bool { - switch n := cur.Node().(type) { - case *ast.FuncLit: - return false // don't descend into nested function literals - case *ast.SelectorExpr: - if n.Sel.Name == "N" && typesinternal.IsPointerToNamed(info.TypeOf(n.X), "testing", "B") { - bnRefCount++ - } + sel := cur.Node().(*ast.SelectorExpr) + if sel.Sel.Name == "N" && + typesinternal.IsPointerToNamed(info.TypeOf(sel.X), "testing", "B") { + bnRefCount++ } return true }) @@ -240,7 +234,7 @@ func isIncrementLoop(info *types.Info, loop *ast.ForStmt) *types.Var { if assign, ok := loop.Init.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE && len(assign.Rhs) == 1 && - isZeroIntLiteral(info, assign.Rhs[0]) && + isZeroIntConst(info, assign.Rhs[0]) && is[*ast.IncDecStmt](loop.Post) && loop.Post.(*ast.IncDecStmt).Tok == token.INC && astutil.EqualSyntax(loop.Post.(*ast.IncDecStmt).X, assign.Lhs[0]) { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go index 5d8f93c18c..7469002f56 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go @@ -176,8 +176,12 @@ This analyzer finds declarations of functions of this form: and suggests a fix to turn them into inlinable wrappers around go1.26's built-in new(expr) function: + //go:fix inline func varOf(x int) *int { return new(x) } +(The directive comment causes the 'inline' analyzer to suggest +that calls to such functions are inlined.) + In addition, this analyzer suggests a fix for each call to one of the functions before it is transformed, so that @@ -187,9 +191,9 @@ is replaced by: use(new(123)) -(Wrapper functions such as varOf are common when working with Go +Wrapper functions such as varOf are common when working with Go serialization packages such as for JSON or protobuf, where pointers -are often used to express optionality.) +are often used to express optionality. # Analyzer omitzero @@ -205,6 +209,22 @@ Replacing `omitempty` with `omitzero` is a change in behavior. The original code would always encode the struct field, whereas the modified code will omit it if it is a zero-value. +# Analyzer plusbuild + +plusbuild: remove obsolete //+build comments + +The plusbuild analyzer suggests a fix to remove obsolete build tags +of the form: + + //+build linux,amd64 + +in files that also contain a Go 1.18-style tag such as: + + //go:build linux && amd64 + +(It does not check that the old and new tags are consistent; +that is the job of the 'buildtag' analyzer in the vet suite.) + # Analyzer rangeint rangeint: replace 3-clause for loops with for-range over integers @@ -311,6 +331,44 @@ iterator offered by the same data type: where x is one of various well-known types in the standard library. +# Analyzer stringscut + +stringscut: replace strings.Index etc. with strings.Cut + +This analyzer replaces certain patterns of use of [strings.Index] and string slicing by [strings.Cut], added in go1.18. + +For example: + + idx := strings.Index(s, substr) + if idx >= 0 { + return s[:idx] + } + +is replaced by: + + before, _, ok := strings.Cut(s, substr) + if ok { + return before + } + +And: + + idx := strings.Index(s, substr) + if idx >= 0 { + return + } + +is replaced by: + + found := strings.Contains(s, substr) + if found { + return + } + +It also handles variants using [strings.IndexByte] instead of Index, or the bytes package instead of strings. + +Fixes are offered only in cases in which there are no potential modifications of the idx, s, or substr expressions between their definition and use. + # Analyzer stringscutprefix stringscutprefix: replace HasPrefix/TrimPrefix with CutPrefix diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go index b6387ad840..d9a922f846 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go @@ -14,21 +14,21 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/goplsexport" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var errorsastypeAnalyzer = &analysis.Analyzer{ Name: "errorsastype", - Doc: analysisinternal.MustExtractDoc(doc, "errorsastype"), + Doc: analyzerutil.MustExtractDoc(doc, "errorsastype"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#errorsastype", - Requires: []*analysis.Analyzer{generated.Analyzer, typeindexanalyzer.Analyzer}, + Requires: []*analysis.Analyzer{typeindexanalyzer.Analyzer}, Run: errorsastype, } @@ -78,8 +78,6 @@ func init() { // // - if errors.As(err, myerr) && othercond { ... } func errorsastype(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -97,7 +95,7 @@ func errorsastype(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curDeclStmt) - if !fileUses(info, file, "go1.26") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_26) { continue // errors.AsType is too new } @@ -127,6 +125,8 @@ func errorsastype(pass *analysis.Pass) (any, error) { errtype := types.TypeString(v.Type(), qual) // Choose a name for the "ok" variable. + // TODO(adonovan): this pattern also appears in stditerators, + // and is wanted elsewhere; factor. okName := "ok" if okVar := lookup(info, curCall, "ok"); okVar != nil { // The name 'ok' is already declared, but diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go index f2e5360542..389f703466 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go @@ -13,18 +13,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var FmtAppendfAnalyzer = &analysis.Analyzer{ Name: "fmtappendf", - Doc: analysisinternal.MustExtractDoc(doc, "fmtappendf"), + Doc: analyzerutil.MustExtractDoc(doc, "fmtappendf"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -35,8 +34,6 @@ var FmtAppendfAnalyzer = &analysis.Analyzer{ // The fmtappend function replaces []byte(fmt.Sprintf(...)) by // fmt.Appendf(nil, ...), and similarly for Sprint, Sprintln. func fmtappendf(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - index := pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) for _, fn := range []types.Object{ index.Object("fmt", "Sprintf"), @@ -50,7 +47,7 @@ func fmtappendf(pass *analysis.Pass) (any, error) { conv := curCall.Parent().Node().(*ast.CallExpr) tv := pass.TypesInfo.Types[conv.Fun] if tv.IsType() && types.Identical(tv.Type, byteSliceType) && - fileUses(pass.TypesInfo, astutil.EnclosingFile(curCall), "go1.19") { + analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(curCall), versions.Go1_19) { // Have: []byte(fmt.SprintX(...)) // Find "Sprint" identifier. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go index 76e3a8a73c..67f60acaaf 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go @@ -10,22 +10,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/versions" ) var ForVarAnalyzer = &analysis.Analyzer{ - Name: "forvar", - Doc: analysisinternal.MustExtractDoc(doc, "forvar"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: forvar, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#forvar", + Name: "forvar", + Doc: analyzerutil.MustExtractDoc(doc, "forvar"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: forvar, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#forvar", } // forvar offers to fix unnecessary copying of a for variable @@ -39,54 +35,77 @@ var ForVarAnalyzer = &analysis.Analyzer{ // where the two idents are the same, // and the ident is defined (:=) as a variable in the for statement. // (Note that this 'fix' does not work for three clause loops -// because the Go specification says "The variable used by each subsequent iteration +// because the Go specfilesUsingGoVersionsays "The variable used by each subsequent iteration // is declared implicitly before executing the post statement and initialized to the // value of the previous iteration's variable at that moment.") +// +// Variant: same thing in an IfStmt.Init, when the IfStmt is the sole +// loop body statement: +// +// for _, x := range foo { +// if x := x; cond { ... } +// } +// +// (The restriction is necessary to avoid potential problems arising +// from merging two distinct variables.) +// +// This analyzer is synergistic with stditerators, +// which may create redundant "x := x" statements. func forvar(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.22") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_22) { for curLoop := range curFile.Preorder((*ast.RangeStmt)(nil)) { loop := curLoop.Node().(*ast.RangeStmt) if loop.Tok != token.DEFINE { continue } - isLoopVarRedecl := func(assign *ast.AssignStmt) bool { - for i, lhs := range assign.Lhs { - if !(astutil.EqualSyntax(lhs, assign.Rhs[i]) && - (astutil.EqualSyntax(lhs, loop.Key) || astutil.EqualSyntax(lhs, loop.Value))) { - return false + isLoopVarRedecl := func(stmt ast.Stmt) bool { + if assign, ok := stmt.(*ast.AssignStmt); ok && + assign.Tok == token.DEFINE && + len(assign.Lhs) == len(assign.Rhs) { + + for i, lhs := range assign.Lhs { + if !(astutil.EqualSyntax(lhs, assign.Rhs[i]) && + (astutil.EqualSyntax(lhs, loop.Key) || + astutil.EqualSyntax(lhs, loop.Value))) { + return false + } } + return true } - return true + return false } // Have: for k, v := range x { stmts } // // Delete the prefix of stmts that are // of the form k := k; v := v; k, v := k, v; v, k := v, k. for _, stmt := range loop.Body.List { - if assign, ok := stmt.(*ast.AssignStmt); ok && - assign.Tok == token.DEFINE && - len(assign.Lhs) == len(assign.Rhs) && - isLoopVarRedecl(assign) { - - curStmt, _ := curLoop.FindNode(stmt) - edits := refactor.DeleteStmt(pass.Fset.File(stmt.Pos()), curStmt) - if len(edits) > 0 { - pass.Report(analysis.Diagnostic{ - Pos: stmt.Pos(), - End: stmt.End(), - Message: "copying variable is unneeded", - SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove unneeded redeclaration", - TextEdits: edits, - }}, - }) - } + if isLoopVarRedecl(stmt) { + // { x := x; ... } + // ------ + } else if ifstmt, ok := stmt.(*ast.IfStmt); ok && + ifstmt.Init != nil && + len(loop.Body.List) == 1 && // must be sole statement in loop body + isLoopVarRedecl(ifstmt.Init) { + // if x := x; cond { + // ------ + stmt = ifstmt.Init } else { break // stop at first other statement } + + curStmt, _ := curLoop.FindNode(stmt) + edits := refactor.DeleteStmt(pass.Fset.File(stmt.Pos()), curStmt) + if len(edits) > 0 { + pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), + End: stmt.End(), + Message: "copying variable is unneeded", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove unneeded redeclaration", + TextEdits: edits, + }}, + }) + } } } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go index 3072cf6f51..2352c8b608 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go @@ -15,23 +15,20 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) var MapsLoopAnalyzer = &analysis.Analyzer{ - Name: "mapsloop", - Doc: analysisinternal.MustExtractDoc(doc, "mapsloop"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: mapsloop, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#mapsloop", + Name: "mapsloop", + Doc: analyzerutil.MustExtractDoc(doc, "mapsloop"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: mapsloop, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#mapsloop", } // The mapsloop pass offers to simplify a loop of map insertions: @@ -55,8 +52,6 @@ var MapsLoopAnalyzer = &analysis.Analyzer{ // m = make(M) // m = M{} func mapsloop(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "maps", "bytes", "runtime") { @@ -223,8 +218,7 @@ func mapsloop(pass *analysis.Pass) (any, error) { } // Find all range loops around m[k] = v. - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.23") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_23) { file := curFile.Node().(*ast.File) for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go index 7ebf837375..23a0977f21 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go @@ -15,19 +15,18 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var MinMaxAnalyzer = &analysis.Analyzer{ Name: "minmax", - Doc: analysisinternal.MustExtractDoc(doc, "minmax"), + Doc: analyzerutil.MustExtractDoc(doc, "minmax"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -56,8 +55,6 @@ var MinMaxAnalyzer = &analysis.Analyzer{ // - "x := a" or "x = a" or "var x = a" in pattern 2 // - "x < b" or "a < b" in pattern 2 func minmax(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Check for user-defined min/max functions that can be removed checkUserDefinedMinMax(pass) @@ -201,8 +198,7 @@ func minmax(pass *analysis.Pass) (any, error) { // Find all "if a < b { lhs = rhs }" statements. info := pass.TypesInfo - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { astFile := curFile.Node().(*ast.File) for curIfStmt := range curFile.Preorder((*ast.IfStmt)(nil)) { ifStmt := curIfStmt.Node().(*ast.IfStmt) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go index 59adee12da..013ce79d6c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go @@ -16,15 +16,15 @@ import ( "strings" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/moreiters" + "golang.org/x/tools/internal/packagepath" "golang.org/x/tools/internal/stdlib" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) //go:embed doc.go @@ -41,12 +41,14 @@ var Suite = []*analysis.Analyzer{ MinMaxAnalyzer, NewExprAnalyzer, OmitZeroAnalyzer, + plusBuildAnalyzer, RangeIntAnalyzer, ReflectTypeForAnalyzer, SlicesContainsAnalyzer, // SlicesDeleteAnalyzer, // not nil-preserving! SlicesSortAnalyzer, stditeratorsAnalyzer, + stringscutAnalyzer, StringsCutPrefixAnalyzer, StringsSeqAnalyzer, StringsBuilderAnalyzer, @@ -56,18 +58,6 @@ var Suite = []*analysis.Analyzer{ // -- helpers -- -// skipGenerated decorates pass.Report to suppress diagnostics in generated files. -func skipGenerated(pass *analysis.Pass) { - report := pass.Report - pass.Report = func(diag analysis.Diagnostic) { - generated := pass.ResultOf[generated.Analyzer].(*generated.Result) - if generated.IsGenerated(diag.Pos) { - return // skip - } - report(diag) - } -} - // formatExprs formats a comma-separated list of expressions. func formatExprs(fset *token.FileSet, exprs []ast.Expr) string { var buf strings.Builder @@ -80,8 +70,8 @@ func formatExprs(fset *token.FileSet, exprs []ast.Expr) string { return buf.String() } -// isZeroIntLiteral reports whether e is an integer whose value is 0. -func isZeroIntLiteral(info *types.Info, e ast.Expr) bool { +// isZeroIntConst reports whether e is an integer whose value is 0. +func isZeroIntConst(info *types.Info, e ast.Expr) bool { return isIntLiteral(info, e, 0) } @@ -90,41 +80,33 @@ func isIntLiteral(info *types.Info, e ast.Expr, n int64) bool { return info.Types[e].Value == constant.MakeInt64(n) } -// filesUsing returns a cursor for each *ast.File in the inspector +// filesUsingGoVersion returns a cursor for each *ast.File in the inspector // that uses at least the specified version of Go (e.g. "go1.24"). // +// The pass's analyzer must require [inspect.Analyzer]. +// // TODO(adonovan): opt: eliminate this function, instead following the -// approach of [fmtappendf], which uses typeindex and [fileUses]. -// See "Tip" at [fileUses] for motivation. -func filesUsing(inspect *inspector.Inspector, info *types.Info, version string) iter.Seq[inspector.Cursor] { +// approach of [fmtappendf], which uses typeindex and +// [analyzerutil.FileUsesGoVersion]; see "Tip" documented at the +// latter function for motivation. +func filesUsingGoVersion(pass *analysis.Pass, version string) iter.Seq[inspector.Cursor] { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + return func(yield func(inspector.Cursor) bool) { for curFile := range inspect.Root().Children() { file := curFile.Node().(*ast.File) - if !versions.Before(info.FileVersions[file], version) && !yield(curFile) { + if analyzerutil.FileUsesGoVersion(pass, file, version) && !yield(curFile) { break } } } } -// fileUses reports whether the specified file uses at least the -// specified version of Go (e.g. "go1.24"). -// -// Tip: we recommend using this check "late", just before calling -// pass.Report, rather than "early" (when entering each ast.File, or -// each candidate node of interest, during the traversal), because the -// operation is not free, yet is not a highly selective filter: the -// fraction of files that pass most version checks is high and -// increases over time. -func fileUses(info *types.Info, file *ast.File, version string) bool { - return !versions.Before(info.FileVersions[file], version) -} - // within reports whether the current pass is analyzing one of the // specified standard packages or their dependencies. func within(pass *analysis.Pass, pkgs ...string) bool { path := pass.Pkg.Path() - return analysisinternal.IsStdPackage(path) && + return packagepath.IsStdPackage(path) && moreiters.Contains(stdlib.Dependencies(pkgs...), path) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go index b8893244d5..6cb75f247c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go @@ -17,13 +17,14 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/versions" ) var NewExprAnalyzer = &analysis.Analyzer{ Name: "newexpr", - Doc: analysisinternal.MustExtractDoc(doc, "newexpr"), + Doc: analyzerutil.MustExtractDoc(doc, "newexpr"), URL: "https://pkg.go.dev/golang.org/x/tools/gopls/internal/analysis/modernize#newexpr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -60,7 +61,7 @@ func run(pass *analysis.Pass) (any, error) { // Check file version. file := astutil.EnclosingFile(curFuncDecl) - if !fileUses(info, file, "go1.26") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_26) { continue // new(expr) not available in this file } @@ -87,25 +88,18 @@ func run(pass *analysis.Pass) (any, error) { } } - // Disabled until we resolve https://go.dev/issue/75726 - // (Go version skew between caller and callee in inliner.) - // TODO(adonovan): fix and reenable. + // Add a //go:fix inline annotation, if not already present. // - // Also, restore these lines to our section of doc.go: - // //go:fix inline - // ... - // (The directive comment causes the inline analyzer to suggest - // that calls to such functions are inlined.) - if false { - // Add a //go:fix inline annotation, if not already present. - // TODO(adonovan): use ast.ParseDirective when go1.26 is assured. - if !strings.Contains(decl.Doc.Text(), "go:fix inline") { - edits = append(edits, analysis.TextEdit{ - Pos: decl.Pos(), - End: decl.Pos(), - NewText: []byte("//go:fix inline\n"), - }) - } + // The inliner will not inline a newer callee body into an + // older Go file; see https://go.dev/issue/75726. + // + // TODO(adonovan): use ast.ParseDirective when go1.26 is assured. + if !strings.Contains(decl.Doc.Text(), "go:fix inline") { + edits = append(edits, analysis.TextEdit{ + Pos: decl.Pos(), + End: decl.Pos(), + NewText: []byte("//go:fix inline\n"), + }) } if len(edits) > 0 { @@ -140,7 +134,7 @@ func run(pass *analysis.Pass) (any, error) { // Check file version. file := astutil.EnclosingFile(curCall) - if !fileUses(info, file, "go1.26") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_26) { continue // new(expr) not available in this file } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go index bd309cf9d5..4a05d64f42 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go @@ -12,21 +12,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/versions" ) var OmitZeroAnalyzer = &analysis.Analyzer{ - Name: "omitzero", - Doc: analysisinternal.MustExtractDoc(doc, "omitzero"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: omitzero, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#omitzero", + Name: "omitzero", + Doc: analyzerutil.MustExtractDoc(doc, "omitzero"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: omitzero, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#omitzero", } func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Field) { @@ -48,25 +44,20 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi // No omitempty in json tag return } - omitEmptyPos, omitEmptyEnd, err := astutil.RangeInStringLiteral(curField.Tag, match[2], match[3]) + omitEmpty, err := astutil.RangeInStringLiteral(curField.Tag, match[2], match[3]) if err != nil { return } - removePos, removeEnd := omitEmptyPos, omitEmptyEnd + var remove analysis.Range = omitEmpty jsonTag := reflect.StructTag(tagconv).Get("json") if jsonTag == ",omitempty" { // Remove the entire struct tag if json is the only package used if match[1]-match[0] == len(tagconv) { - removePos = curField.Tag.Pos() - removeEnd = curField.Tag.End() + remove = curField.Tag } else { // Remove the json tag if omitempty is the only field - removePos, err = astutil.PosInStringLiteral(curField.Tag, match[0]) - if err != nil { - return - } - removeEnd, err = astutil.PosInStringLiteral(curField.Tag, match[1]) + remove, err = astutil.RangeInStringLiteral(curField.Tag, match[0], match[1]) if err != nil { return } @@ -81,8 +72,8 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi Message: "Remove redundant omitempty tag", TextEdits: []analysis.TextEdit{ { - Pos: removePos, - End: removeEnd, + Pos: remove.Pos(), + End: remove.End(), }, }, }, @@ -90,8 +81,8 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi Message: "Replace omitempty with omitzero (behavior change)", TextEdits: []analysis.TextEdit{ { - Pos: omitEmptyPos, - End: omitEmptyEnd, + Pos: omitEmpty.Pos(), + End: omitEmpty.End(), NewText: []byte(",omitzero"), }, }, @@ -100,18 +91,14 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi } // The omitzero pass searches for instances of "omitempty" in a json field tag on a -// struct. Since "omitempty" does not have any effect when applied to a struct field, +// struct. Since "omitfilesUsingGoVersions not have any effect when applied to a struct field, // it suggests either deleting "omitempty" or replacing it with "omitzero", which // correctly excludes structs from a json encoding. func omitzero(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - info := pass.TypesInfo - for curFile := range filesUsing(inspect, info, "go1.24") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_24) { for curStruct := range curFile.Preorder((*ast.StructType)(nil)) { for _, curField := range curStruct.Node().(*ast.StructType).Fields.List { - checkOmitEmptyField(pass, info, curField) + checkOmitEmptyField(pass, pass.TypesInfo, curField) } } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go new file mode 100644 index 0000000000..57b502ab80 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go @@ -0,0 +1,84 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + "go/parser" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/goplsexport" + "golang.org/x/tools/internal/versions" +) + +var plusBuildAnalyzer = &analysis.Analyzer{ + Name: "plusbuild", + Doc: analyzerutil.MustExtractDoc(doc, "plusbuild"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#plusbuild", + Run: plusbuild, +} + +func init() { + // Export to gopls until this is a published modernizer. + goplsexport.PlusBuildModernizer = plusBuildAnalyzer +} + +func plusbuild(pass *analysis.Pass) (any, error) { + check := func(f *ast.File) { + if !analyzerutil.FileUsesGoVersion(pass, f, versions.Go1_18) { + return + } + + // When gofmt sees a +build comment, it adds a + // preceding equivalent //go:build directive, so in + // formatted files we can assume that a +build line is + // part of a comment group that starts with a + // //go:build line and is followed by a blank line. + // + // While we cannot delete comments from an AST and + // expect consistent output in general, this specific + // case--deleting only some lines from a comment + // block--does format correctly. + for _, g := range f.Comments { + sawGoBuild := false + for _, c := range g.List { + if sawGoBuild && strings.HasPrefix(c.Text, "// +build ") { + pass.Report(analysis.Diagnostic{ + Pos: c.Pos(), + End: c.End(), + Message: "+build line is no longer needed", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove obsolete +build line", + TextEdits: []analysis.TextEdit{{ + Pos: c.Pos(), + End: c.End(), + }}, + }}, + }) + break + } + if strings.HasPrefix(c.Text, "//go:build ") { + sawGoBuild = true + } + } + } + } + + for _, f := range pass.Files { + check(f) + } + for _, name := range pass.IgnoredFiles { + if strings.HasSuffix(name, ".go") { + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments|parser.SkipObjectResolution) + if err != nil { + continue // parse error: ignore + } + check(f) + } + } + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go index adc840f11d..6b1edf38b3 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go @@ -15,19 +15,18 @@ import ( "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var RangeIntAnalyzer = &analysis.Analyzer{ Name: "rangeint", - Doc: analysisinternal.MustExtractDoc(doc, "rangeint"), + Doc: analyzerutil.MustExtractDoc(doc, "rangeint"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -66,21 +65,19 @@ var RangeIntAnalyzer = &analysis.Analyzer{ // - a constant; or // - len(s), where s has the above properties. func rangeint(pass *analysis.Pass) (any, error) { - skipGenerated(pass) + var ( + info = pass.TypesInfo + typeindex = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + ) - info := pass.TypesInfo - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - typeindex := pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - - for curFile := range filesUsing(inspect, info, "go1.22") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_22) { nextLoop: for curLoop := range curFile.Preorder((*ast.ForStmt)(nil)) { loop := curLoop.Node().(*ast.ForStmt) if init, ok := loop.Init.(*ast.AssignStmt); ok && isSimpleAssign(init) && is[*ast.Ident](init.Lhs[0]) && - isZeroIntLiteral(info, init.Rhs[0]) { + isZeroIntConst(info, init.Rhs[0]) { // Have: for i = 0; ... (or i := 0) index := init.Lhs[0].(*ast.Ident) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go index 1a4e3eb267..0fc781813f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go @@ -14,10 +14,10 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" "golang.org/x/tools/internal/versions" @@ -25,9 +25,8 @@ import ( var ReflectTypeForAnalyzer = &analysis.Analyzer{ Name: "reflecttypefor", - Doc: analysisinternal.MustExtractDoc(doc, "reflecttypefor"), + Doc: analyzerutil.MustExtractDoc(doc, "reflecttypefor"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -36,8 +35,6 @@ var ReflectTypeForAnalyzer = &analysis.Analyzer{ } func reflecttypefor(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -66,16 +63,15 @@ func reflecttypefor(pass *analysis.Pass) (any, error) { obj := typeutil.Callee(info, call2) if typesinternal.IsMethodNamed(obj, "reflect", "Type", "Elem") { if ptr, ok := t.(*types.Pointer); ok { - // Have: reflect.TypeOf(...*T value...).Elem() - // => reflect.TypeFor[T]() + // Have: TypeOf(expr).Elem() where expr : *T t = ptr.Elem() - edits = []analysis.TextEdit{ - { - // delete .Elem() - Pos: call.End(), - End: call2.End(), - }, - } + // reflect.TypeOf(expr).Elem() + // ------- + // reflect.TypeOf(expr) + edits = []analysis.TextEdit{{ + Pos: call.End(), + End: call2.End(), + }} } } } @@ -89,9 +85,10 @@ func reflecttypefor(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curCall) - if versions.Before(info.FileVersions[file], "go1.22") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_22) { continue // TypeFor requires go1.22 } + tokFile := pass.Fset.File(file.Pos()) // Format the type as valid Go syntax. // TODO(adonovan): FileQualifier needs to respect @@ -105,6 +102,14 @@ func reflecttypefor(pass *analysis.Pass) (any, error) { continue // e.g. reflect was dot-imported } + // If the call argument contains the last use + // of a variable, as in: + // var zero T + // reflect.TypeOf(zero) + // remove the declaration of that variable. + curArg0 := curCall.ChildAt(edge.CallExpr_Args, 0) + edits = append(edits, refactor.DeleteUnusedVars(index, info, tokFile, curArg0)...) + pass.Report(analysis.Diagnostic{ Pos: call.Fun.Pos(), End: call.Fun.End(), diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go index 032f874df1..960a46644b 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go @@ -13,25 +13,21 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // Warning: this analyzer is not safe to enable by default. var AppendClippedAnalyzer = &analysis.Analyzer{ - Name: "appendclipped", - Doc: analysisinternal.MustExtractDoc(doc, "appendclipped"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: appendclipped, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#appendclipped", + Name: "appendclipped", + Doc: analyzerutil.MustExtractDoc(doc, "appendclipped"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: appendclipped, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#appendclipped", } // The appendclipped pass offers to simplify a tower of append calls: @@ -59,8 +55,6 @@ var AppendClippedAnalyzer = &analysis.Analyzer{ // The fix does not always preserve nilness the of base slice when the // addends (a, b, c) are all empty (see #73557). func appendclipped(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "bytes", "runtime") { @@ -205,8 +199,7 @@ func appendclipped(pass *analysis.Pass) (any, error) { skip := make(map[*ast.CallExpr]bool) // Visit calls of form append(x, y...). - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { file := curFile.Node().(*ast.File) for curCall := range curFile.Preorder((*ast.CallExpr)(nil)) { @@ -266,7 +259,7 @@ func clippedSlice(info *types.Info, e ast.Expr) (res ast.Expr, empty bool) { // x[:0:0], x[:len(x):len(x)], x[:k:k] if e.Slice3 && e.High != nil && e.Max != nil && astutil.EqualSyntax(e.High, e.Max) { // x[:k:k] res = e - empty = isZeroIntLiteral(info, e.High) // x[:0:0] + empty = isZeroIntConst(info, e.High) // x[:0:0] if call, ok := e.High.(*ast.CallExpr); ok && typeutil.Callee(info, call) == builtinLen && astutil.EqualSyntax(call.Args[0], e.X) { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go index b3c2e562c9..3b32685266 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go @@ -14,20 +14,19 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var SlicesContainsAnalyzer = &analysis.Analyzer{ Name: "slicescontains", - Doc: analysisinternal.MustExtractDoc(doc, "slicescontains"), + Doc: analyzerutil.MustExtractDoc(doc, "slicescontains"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -66,8 +65,6 @@ var SlicesContainsAnalyzer = &analysis.Analyzer{ // TODO(adonovan): Add a check that needle/predicate expression from // if-statement has no effects. Now the program behavior may change. func slicescontains(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "runtime") { @@ -75,9 +72,8 @@ func slicescontains(pass *analysis.Pass) (any, error) { } var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo ) // check is called for each RangeStmt of this form: @@ -312,7 +308,7 @@ func slicescontains(pass *analysis.Pass) (any, error) { // Special case: // prev="lhs = false" body={ lhs = true; break } - // => lhs = slices.Contains(...) (or negation) + // => lhs = slices.Contains(...) (or its negation) if assign, ok := body.List[0].(*ast.AssignStmt); ok && len(body.List) == 2 && assign.Tok == token.ASSIGN && @@ -320,13 +316,12 @@ func slicescontains(pass *analysis.Pass) (any, error) { len(assign.Rhs) == 1 { // Have: body={ lhs = rhs; break } - if prevAssign, ok := prevStmt.(*ast.AssignStmt); ok && len(prevAssign.Lhs) == 1 && len(prevAssign.Rhs) == 1 && astutil.EqualSyntax(prevAssign.Lhs[0], assign.Lhs[0]) && - is[*ast.Ident](assign.Rhs[0]) && - info.Uses[assign.Rhs[0].(*ast.Ident)] == builtinTrue { + isTrueOrFalse(info, assign.Rhs[0]) == + -isTrueOrFalse(info, prevAssign.Rhs[0]) { // Have: // lhs = false @@ -336,15 +331,14 @@ func slicescontains(pass *analysis.Pass) (any, error) { // // TODO(adonovan): // - support "var lhs bool = false" and variants. - // - support negation. - // Both these variants seem quite significant. // - allow the break to be omitted. + neg := cond(isTrueOrFalse(info, assign.Rhs[0]) < 0, "!", "") report([]analysis.TextEdit{ - // Replace "rhs" of previous assignment by slices.Contains(...) + // Replace "rhs" of previous assignment by [!]slices.Contains(...) { Pos: prevAssign.Rhs[0].Pos(), End: prevAssign.Rhs[0].End(), - NewText: []byte(contains), + NewText: []byte(neg + contains), }, // Delete the loop and preceding space. { @@ -388,7 +382,7 @@ func slicescontains(pass *analysis.Pass) (any, error) { } } - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { file := curFile.Node().(*ast.File) for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { @@ -420,13 +414,19 @@ func slicescontains(pass *analysis.Pass) (any, error) { // isReturnTrueOrFalse returns nonzero if stmt returns true (+1) or false (-1). func isReturnTrueOrFalse(info *types.Info, stmt ast.Stmt) int { if ret, ok := stmt.(*ast.ReturnStmt); ok && len(ret.Results) == 1 { - if id, ok := ret.Results[0].(*ast.Ident); ok { - switch info.Uses[id] { - case builtinTrue: - return +1 - case builtinFalse: - return -1 - } + return isTrueOrFalse(info, ret.Results[0]) + } + return 0 +} + +// isTrueOrFalse returns nonzero if expr is literally true (+1) or false (-1). +func isTrueOrFalse(info *types.Info, expr ast.Expr) int { + if id, ok := expr.(*ast.Ident); ok { + switch info.Uses[id] { + case builtinTrue: + return +1 + case builtinFalse: + return -1 } } return 0 diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go index b3e063db0f..7b3aa875c0 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go @@ -12,24 +12,20 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // Warning: this analyzer is not safe to enable by default (not nil-preserving). var SlicesDeleteAnalyzer = &analysis.Analyzer{ - Name: "slicesdelete", - Doc: analysisinternal.MustExtractDoc(doc, "slicesdelete"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: slicesdelete, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#slicesdelete", + Name: "slicesdelete", + Doc: analyzerutil.MustExtractDoc(doc, "slicesdelete"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: slicesdelete, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#slicesdelete", } // The slicesdelete pass attempts to replace instances of append(s[:i], s[i+k:]...) @@ -37,15 +33,12 @@ var SlicesDeleteAnalyzer = &analysis.Analyzer{ // Other variations that will also have suggested replacements include: // append(s[:i-1], s[i:]...) and append(s[:i+k1], s[i+k2:]) where k2 > k1. func slicesdelete(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "runtime") { return nil, nil } - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) info := pass.TypesInfo report := func(file *ast.File, call *ast.CallExpr, slice1, slice2 *ast.SliceExpr) { insert := func(pos token.Pos, text string) analysis.TextEdit { @@ -55,7 +48,7 @@ func slicesdelete(pass *analysis.Pass) (any, error) { return types.Identical(types.Default(info.TypeOf(e)), builtinInt.Type()) } isIntShadowed := func() bool { - scope := pass.TypesInfo.Scopes[file].Innermost(call.Lparen) + scope := info.Scopes[file].Innermost(call.Lparen) if _, obj := scope.LookupParent("int", call.Lparen); obj != builtinInt { return true // int type is shadowed } @@ -130,7 +123,7 @@ func slicesdelete(pass *analysis.Pass) (any, error) { }}, }) } - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { file := curFile.Node().(*ast.File) for curCall := range curFile.Preorder((*ast.CallExpr)(nil)) { call := curCall.Node().(*ast.CallExpr) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go index 66af16d1f6..e22b8c55f5 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go @@ -11,20 +11,19 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) // (Not to be confused with go/analysis/passes/sortslice.) var SlicesSortAnalyzer = &analysis.Analyzer{ Name: "slicessort", - Doc: analysisinternal.MustExtractDoc(doc, "slicessort"), + Doc: analyzerutil.MustExtractDoc(doc, "slicessort"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -52,8 +51,6 @@ var SlicesSortAnalyzer = &analysis.Analyzer{ // - sort.Sort(x) where x has a named slice type whose Less method is the natural order. // -> sort.Slice(x) func slicessort(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "sort", "runtime") { @@ -87,7 +84,7 @@ func slicessort(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curCall) if isIndex(compare.X, i) && isIndex(compare.Y, j) && - fileUses(info, file, "go1.21") { + analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_21) { // Have: sort.Slice(s, func(i, j int) bool { return s[i] < s[j] }) prefix, importEdits := refactor.AddImport( diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go index 20817520e1..cc59580671 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go @@ -14,9 +14,8 @@ import ( "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/goplsexport" "golang.org/x/tools/internal/refactor" @@ -26,9 +25,8 @@ import ( var stditeratorsAnalyzer = &analysis.Analyzer{ Name: "stditerators", - Doc: analysisinternal.MustExtractDoc(doc, "stditerators"), + Doc: analyzerutil.MustExtractDoc(doc, "stditerators"), Requires: []*analysis.Analyzer{ - generated.Analyzer, typeindexanalyzer.Analyzer, }, Run: stditerators, @@ -89,8 +87,6 @@ var stditeratorsTable = [...]struct { // iterator for that reason? We don't want to go fix to // undo optimizations. Do we need a suppression mechanism? func stditerators(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -116,6 +112,10 @@ func stditerators(pass *analysis.Pass) (any, error) { // // for ... { e := x.At(i); use(e) } // + // or + // + // for ... { if e := x.At(i); cond { use(e) } } + // // then chooseName prefers the name e and additionally // returns the var's symbol. We'll transform this to: // @@ -124,10 +124,11 @@ func stditerators(pass *analysis.Pass) (any, error) { // which leaves a redundant assignment that a // subsequent 'forvar' pass will eliminate. chooseName := func(curBody inspector.Cursor, x ast.Expr, i *types.Var) (string, *types.Var) { - // Is body { elem := x.At(i); ... } ? - body := curBody.Node().(*ast.BlockStmt) - if len(body.List) > 0 { - if assign, ok := body.List[0].(*ast.AssignStmt); ok && + + // isVarAssign reports whether stmt has the form v := x.At(i) + // and returns the variable if so. + isVarAssign := func(stmt ast.Stmt) *types.Var { + if assign, ok := stmt.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE && len(assign.Lhs) == 1 && len(assign.Rhs) == 1 && @@ -138,15 +139,47 @@ func stditerators(pass *analysis.Pass) (any, error) { astutil.EqualSyntax(ast.Unparen(call.Fun).(*ast.SelectorExpr).X, x) && is[*ast.Ident](call.Args[0]) && info.Uses[call.Args[0].(*ast.Ident)] == i { - // Have: { elem := x.At(i); ... } + // Have: elem := x.At(i) id := assign.Lhs[0].(*ast.Ident) - return id.Name, info.Defs[id].(*types.Var) + return info.Defs[id].(*types.Var) + } + } + return nil + } + + body := curBody.Node().(*ast.BlockStmt) + if len(body.List) > 0 { + // Is body { elem := x.At(i); ... } ? + if v := isVarAssign(body.List[0]); v != nil { + return v.Name(), v + } + + // Or { if elem := x.At(i); cond { ... } } ? + if ifstmt, ok := body.List[0].(*ast.IfStmt); ok && ifstmt.Init != nil { + if v := isVarAssign(ifstmt.Init); v != nil { + return v.Name(), v } } } loop := curBody.Parent().Node() - return refactor.FreshName(info.Scopes[loop], loop.Pos(), row.elemname), nil + + // Choose a fresh name only if + // (a) the preferred name is already declared here, and + // (b) there are references to it from the loop body. + // TODO(adonovan): this pattern also appears in errorsastype, + // and is wanted elsewhere; factor. + name := row.elemname + if v := lookup(info, curBody, name); v != nil { + // is it free in body? + for curUse := range index.Uses(v) { + if curBody.Contains(curUse) { + name = refactor.FreshName(info.Scopes[loop], loop.Pos(), name) + break + } + } + } + return name, nil } // Process each call of x.Len(). @@ -191,7 +224,7 @@ func stditerators(pass *analysis.Pass) (any, error) { } // Have: for i := 0; i < x.Len(); i++ { ... }. // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - rng = analysisinternal.Range(loop.For, loop.Post.End()) + rng = astutil.RangeOf(loop.For, loop.Post.End()) indexVar = v curBody = curFor.ChildAt(edge.ForStmt_Body, -1) elem, elemVar = chooseName(curBody, lenSel.X, indexVar) @@ -234,7 +267,7 @@ func stditerators(pass *analysis.Pass) (any, error) { // Have: for i := range x.Len() { ... } // ~~~~~~~~~~~~~ - rng = analysisinternal.Range(loop.Range, loop.X.End()) + rng = astutil.RangeOf(loop.Range, loop.X.End()) indexVar = info.Defs[id].(*types.Var) curBody = curRange.ChildAt(edge.RangeStmt_Body, -1) elem, elemVar = chooseName(curBody, lenSel.X, indexVar) @@ -313,7 +346,7 @@ func stditerators(pass *analysis.Pass) (any, error) { // may be somewhat expensive.) if v, ok := methodGoVersion(row.pkgpath, row.typename, row.itermethod); !ok { panic("no version found") - } else if file := astutil.EnclosingFile(curLenCall); !fileUses(info, file, v.String()) { + } else if !analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(curLenCall), v.String()) { continue nextCall } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go index 56d0ba73cc..56c5d0e3b3 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go @@ -15,9 +15,8 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" @@ -26,9 +25,8 @@ import ( var StringsBuilderAnalyzer = &analysis.Analyzer{ Name: "stringsbuilder", - Doc: analysisinternal.MustExtractDoc(doc, "stringsbuilder"), + Doc: analyzerutil.MustExtractDoc(doc, "stringsbuilder"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -38,8 +36,6 @@ var StringsBuilderAnalyzer = &analysis.Analyzer{ // stringsbuilder replaces string += string in a loop by strings.Builder. func stringsbuilder(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "strings", "runtime") { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscut.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscut.go new file mode 100644 index 0000000000..521c264c51 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscut.go @@ -0,0 +1,580 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "iter" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/goplsexport" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" +) + +var stringscutAnalyzer = &analysis.Analyzer{ + Name: "stringscut", + Doc: analyzerutil.MustExtractDoc(doc, "stringscut"), + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: stringscut, + URL: "https://pkg.go.dev/golang.org/x/tools/gopls/internal/analysis/modernize#stringscut", +} + +func init() { + // Export to gopls until this is a published modernizer. + goplsexport.StringsCutModernizer = stringscutAnalyzer +} + +// stringscut offers a fix to replace an occurrence of strings.Index{,Byte} with +// strings.{Cut,Contains}, and similar fixes for functions in the bytes package. +// Consider some candidate for replacement i := strings.Index(s, substr). +// The following must hold for a replacement to occur: +// +// 1. All instances of i and s must be in one of these forms. +// Binary expressions: +// (a): establishing that i < 0: e.g.: i < 0, 0 > i, i == -1, -1 == i +// (b): establishing that i > -1: e.g.: i >= 0, 0 <= i, i == 0, 0 == i +// +// Slice expressions: +// a: s[:i], s[0:i] +// b: s[i+len(substr):], s[len(substr) + i:], s[i + const], s[k + i] (where k = len(substr)) +// +// 2. There can be no uses of s, substr, or i where they are +// potentially modified (i.e. in assignments, or function calls with unknown side +// effects). +// +// Then, the replacement involves the following substitutions: +// +// 1. Replace "i := strings.Index(s, substr)" with "before, after, ok := strings.Cut(s, substr)" +// +// 2. Replace instances of binary expressions (a) with !ok and binary expressions (b) with ok. +// +// 3. Replace slice expressions (a) with "before" and slice expressions (b) with after. +// +// 4. The assignments to before, after, and ok may use the blank identifier "_" if they are unused. +// +// For example: +// +// i := strings.Index(s, substr) +// if i >= 0 { +// use(s[:i], s[i+len(substr):]) +// } +// +// Would become: +// +// before, after, ok := strings.Cut(s, substr) +// if ok { +// use(before, after) +// } +// +// If the condition involving `i` establishes that i > -1, then we replace it with +// `if ok“. Variants listed above include i >= 0, i > 0, and i == 0. +// If the condition is negated (e.g. establishes `i < 0`), we use `if !ok` instead. +// If the slices of `s` match `s[:i]` or `s[i+len(substr):]` or their variants listed above, +// then we replace them with before and after. +// +// When the index `i` is used only to check for the presence of the substring or byte slice, +// the suggested fix uses Contains() instead of Cut. +// +// For example: +// +// i := strings.Index(s, substr) +// if i >= 0 { +// return +// } +// +// Would become: +// +// found := strings.Contains(s, substr) +// if found { +// return +// } +func stringscut(pass *analysis.Pass) (any, error) { + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + + stringsIndex = index.Object("strings", "Index") + stringsIndexByte = index.Object("strings", "IndexByte") + bytesIndex = index.Object("bytes", "Index") + bytesIndexByte = index.Object("bytes", "IndexByte") + ) + + for _, obj := range []types.Object{ + stringsIndex, + stringsIndexByte, + bytesIndex, + bytesIndexByte, + } { + // (obj may be nil) + nextcall: + for curCall := range index.Calls(obj) { + // Check file version. + if !analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(curCall), versions.Go1_18) { + continue // strings.Index not available in this file + } + indexCall := curCall.Node().(*ast.CallExpr) // the call to strings.Index, etc. + obj := typeutil.Callee(info, indexCall) + if obj == nil { + continue + } + + var iIdent *ast.Ident // defining identifier of i var + switch ek, idx := curCall.ParentEdge(); ek { + case edge.ValueSpec_Values: + // Have: var i = strings.Index(...) + curName := curCall.Parent().ChildAt(edge.ValueSpec_Names, idx) + iIdent = curName.Node().(*ast.Ident) + case edge.AssignStmt_Rhs: + // Have: i := strings.Index(...) + // (Must be i's definition.) + curLhs := curCall.Parent().ChildAt(edge.AssignStmt_Lhs, idx) + iIdent, _ = curLhs.Node().(*ast.Ident) // may be nil + } + + if iIdent == nil { + continue + } + // Inv: iIdent is i's definition. The following would be skipped: 'var i int; i = strings.Index(...)' + // Get uses of i. + iObj := info.ObjectOf(iIdent) + if iObj == nil { + continue + } + + var ( + s = indexCall.Args[0] + substr = indexCall.Args[1] + ) + + // Check that there are no statements that alter the value of s + // or substr after the call to Index(). + if !indexArgValid(info, index, s, indexCall.Pos()) || + !indexArgValid(info, index, substr, indexCall.Pos()) { + continue nextcall + } + + // Next, examine all uses of i. If the only uses are of the + // forms mentioned above (e.g. i < 0, i >= 0, s[:i] and s[i + + // len(substr)]), then we can replace the call to Index() + // with a call to Cut() and use the returned ok, before, + // and after variables accordingly. + lessZero, greaterNegOne, beforeSlice, afterSlice := checkIdxUses(pass.TypesInfo, index.Uses(iObj), s, substr) + + // Either there are no uses of before, after, or ok, or some use + // of i does not match our criteria - don't suggest a fix. + if lessZero == nil && greaterNegOne == nil && beforeSlice == nil && afterSlice == nil { + continue + } + + // If the only uses are ok and !ok, don't suggest a Cut() fix - these should be using Contains() + isContains := (len(lessZero) > 0 || len(greaterNegOne) > 0) && len(beforeSlice) == 0 && len(afterSlice) == 0 + + scope := iObj.Parent() + var ( + // TODO(adonovan): avoid FreshName when not needed; see errorsastype. + okVarName = refactor.FreshName(scope, iIdent.Pos(), "ok") + beforeVarName = refactor.FreshName(scope, iIdent.Pos(), "before") + afterVarName = refactor.FreshName(scope, iIdent.Pos(), "after") + foundVarName = refactor.FreshName(scope, iIdent.Pos(), "found") // for Contains() + ) + + // If there will be no uses of ok, before, or after, use the + // blank identifier instead. + if len(lessZero) == 0 && len(greaterNegOne) == 0 { + okVarName = "_" + } + if len(beforeSlice) == 0 { + beforeVarName = "_" + } + if len(afterSlice) == 0 { + afterVarName = "_" + } + + var edits []analysis.TextEdit + replace := func(exprs []ast.Expr, new string) { + for _, expr := range exprs { + edits = append(edits, analysis.TextEdit{ + Pos: expr.Pos(), + End: expr.End(), + NewText: []byte(new), + }) + } + } + // Get the ident for the call to strings.Index, which could just be + // "Index" if the strings package is dot imported. + indexCallId := typesinternal.UsedIdent(info, indexCall.Fun) + replacedFunc := "Cut" + if isContains { + replacedFunc = "Contains" + replace(lessZero, "!"+foundVarName) // idx < 0 -> !found + replace(greaterNegOne, foundVarName) // idx > -1 -> found + + // Replace the assignment with found, and replace the call to + // Index or IndexByte with a call to Contains. + // i := strings.Index (...) + // ----- -------- + // found := strings.Contains(...) + edits = append(edits, analysis.TextEdit{ + Pos: iIdent.Pos(), + End: iIdent.End(), + NewText: []byte(foundVarName), + }, analysis.TextEdit{ + Pos: indexCallId.Pos(), + End: indexCallId.End(), + NewText: []byte("Contains"), + }) + } else { + replace(lessZero, "!"+okVarName) // idx < 0 -> !ok + replace(greaterNegOne, okVarName) // idx > -1 -> ok + replace(beforeSlice, beforeVarName) // s[:idx] -> before + replace(afterSlice, afterVarName) // s[idx+k:] -> after + + // Replace the assignment with before, after, ok, and replace + // the call to Index or IndexByte with a call to Cut. + // i := strings.Index(...) + // ----------------- ----- + // before, after, ok := strings.Cut (...) + edits = append(edits, analysis.TextEdit{ + Pos: iIdent.Pos(), + End: iIdent.End(), + NewText: fmt.Appendf(nil, "%s, %s, %s", beforeVarName, afterVarName, okVarName), + }, analysis.TextEdit{ + Pos: indexCallId.Pos(), + End: indexCallId.End(), + NewText: []byte("Cut"), + }) + } + + // Calls to IndexByte have a byte as their second arg, which + // must be converted to a string or []byte to be a valid arg for Cut/Contains. + if obj.Name() == "IndexByte" { + switch obj.Pkg().Name() { + case "strings": + searchByteVal := info.Types[substr].Value + if searchByteVal == nil { + // substr is a variable, e.g. substr := byte('b') + // use string(substr) + edits = append(edits, []analysis.TextEdit{ + { + Pos: substr.Pos(), + NewText: []byte("string("), + }, + { + Pos: substr.End(), + NewText: []byte(")"), + }, + }...) + } else { + // substr is a byte constant + val, _ := constant.Int64Val(searchByteVal) // inv: must be a valid byte + // strings.Cut/Contains requires a string, so convert byte literal to string literal; e.g. 'a' -> "a", 55 -> "7" + edits = append(edits, analysis.TextEdit{ + Pos: substr.Pos(), + End: substr.End(), + NewText: strconv.AppendQuote(nil, string(byte(val))), + }) + } + case "bytes": + // bytes.Cut/Contains requires a []byte, so wrap substr in a []byte{} + edits = append(edits, []analysis.TextEdit{ + { + Pos: substr.Pos(), + NewText: []byte("[]byte{"), + }, + { + Pos: substr.End(), + NewText: []byte("}"), + }, + }...) + } + } + pass.Report(analysis.Diagnostic{ + Pos: indexCall.Fun.Pos(), + End: indexCall.Fun.End(), + Message: fmt.Sprintf("%s.%s can be simplified using %s.%s", + obj.Pkg().Name(), obj.Name(), obj.Pkg().Name(), replacedFunc), + Category: "stringscut", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Simplify %s.%s call using %s.%s", obj.Pkg().Name(), obj.Name(), obj.Pkg().Name(), replacedFunc), + TextEdits: edits, + }}, + }) + } + } + + return nil, nil +} + +// indexArgValid reports whether expr is a valid strings.Index(_, _) arg +// for the transformation. An arg is valid iff it is: +// - constant; +// - a local variable with no modifying uses after the Index() call; or +// - []byte(x) where x is also valid by this definition. +// All other expressions are assumed not referentially transparent, +// so we cannot be sure that all uses are safe to replace. +func indexArgValid(info *types.Info, index *typeindex.Index, expr ast.Expr, afterPos token.Pos) bool { + tv := info.Types[expr] + if tv.Value != nil { + return true // constant + } + switch expr := expr.(type) { + case *ast.CallExpr: + return types.Identical(tv.Type, byteSliceType) && + indexArgValid(info, index, expr.Args[0], afterPos) // check s in []byte(s) + case *ast.Ident: + sObj := info.Uses[expr] + sUses := index.Uses(sObj) + return !hasModifyingUses(info, sUses, afterPos) + default: + // For now, skip instances where s or substr are not + // identifers, basic lits, or call expressions of the form + // []byte(s). + // TODO(mkalil): Handle s and substr being expressions like ptr.field[i]. + // From adonovan: We'd need to analyze s and substr to see + // whether they are referentially transparent, and if not, + // analyze all code between declaration and use and see if + // there are statements or expressions with potential side + // effects. + return false + } +} + +// checkIdxUses inspects the uses of i to make sure they match certain criteria that +// allows us to suggest a modernization. If all uses of i, s and substr match +// one of the following four valid formats, it returns a list of occurrences for +// each format. If any of the uses do not match one of the formats, return nil +// for all values, since we should not offer a replacement. +// 1. lessZero - a condition involving i establishing that i is negative (e.g. i < 0, 0 > i, i == -1, -1 == i) +// 2. greaterNegOne - a condition involving i establishing that i is non-negative (e.g. i >= 0, 0 <= i, i == 0, 0 == i) +// 3. beforeSlice - a slice of `s` that matches either s[:i], s[0:i] +// 4. afterSlice - a slice of `s` that matches one of: s[i+len(substr):], s[len(substr) + i:], s[i + const], s[k + i] (where k = len(substr)) +func checkIdxUses(info *types.Info, uses iter.Seq[inspector.Cursor], s, substr ast.Expr) (lessZero, greaterNegOne, beforeSlice, afterSlice []ast.Expr) { + use := func(cur inspector.Cursor) bool { + ek, _ := cur.ParentEdge() + n := cur.Parent().Node() + switch ek { + case edge.BinaryExpr_X, edge.BinaryExpr_Y: + check := n.(*ast.BinaryExpr) + switch checkIdxComparison(info, check) { + case -1: + lessZero = append(lessZero, check) + return true + case 1: + greaterNegOne = append(greaterNegOne, check) + return true + } + // Check does not establish that i < 0 or i > -1. + // Might be part of an outer slice expression like s[i + k] + // which requires a different check. + // Check that the thing being sliced is s and that the slice + // doesn't have a max index. + if slice, ok := cur.Parent().Parent().Node().(*ast.SliceExpr); ok && + sameObject(info, s, slice.X) && + slice.Max == nil { + if isBeforeSlice(info, ek, slice) { + beforeSlice = append(beforeSlice, slice) + return true + } else if isAfterSlice(info, ek, slice, substr) { + afterSlice = append(afterSlice, slice) + return true + } + } + case edge.SliceExpr_Low, edge.SliceExpr_High: + slice := n.(*ast.SliceExpr) + // Check that the thing being sliced is s and that the slice doesn't + // have a max index. + if sameObject(info, s, slice.X) && slice.Max == nil { + if isBeforeSlice(info, ek, slice) { + beforeSlice = append(beforeSlice, slice) + return true + } else if isAfterSlice(info, ek, slice, substr) { + afterSlice = append(afterSlice, slice) + return true + } + } + } + return false + } + + for curIdent := range uses { + if !use(curIdent) { + return nil, nil, nil, nil + } + } + return lessZero, greaterNegOne, beforeSlice, afterSlice +} + +// hasModifyingUses reports whether any of the uses involve potential +// modifications. Uses involving assignments before the "afterPos" won't be +// considered. +func hasModifyingUses(info *types.Info, uses iter.Seq[inspector.Cursor], afterPos token.Pos) bool { + for curUse := range uses { + ek, _ := curUse.ParentEdge() + if ek == edge.AssignStmt_Lhs { + if curUse.Node().Pos() <= afterPos { + continue + } + assign := curUse.Parent().Node().(*ast.AssignStmt) + if sameObject(info, assign.Lhs[0], curUse.Node().(*ast.Ident)) { + // Modifying use because we are reassigning the value of the object. + return true + } + } else if ek == edge.UnaryExpr_X && + curUse.Parent().Node().(*ast.UnaryExpr).Op == token.AND { + // Modifying use because we might be passing the object by reference (an explicit &). + // We can ignore the case where we have a method call on the expression (which + // has an implicit &) because we know the type of s and substr are strings + // which cannot have methods on them. + return true + } + } + return false +} + +// checkIdxComparison reports whether the check establishes that i is negative +// or non-negative. It returns -1 in the first case, 1 in the second, and 0 if +// we can confirm neither condition. We assume that a check passed to +// checkIdxComparison has i as one of its operands. +func checkIdxComparison(info *types.Info, check *ast.BinaryExpr) int { + // Check establishes that i is negative. + // e.g.: i < 0, 0 > i, i == -1, -1 == i + if check.Op == token.LSS && (isNegativeConst(info, check.Y) || isZeroIntConst(info, check.Y)) || //i < (0 or neg) + check.Op == token.GTR && (isNegativeConst(info, check.X) || isZeroIntConst(info, check.X)) || // (0 or neg) > i + check.Op == token.LEQ && (isNegativeConst(info, check.Y)) || //i <= (neg) + check.Op == token.GEQ && (isNegativeConst(info, check.X)) || // (neg) >= i + check.Op == token.EQL && + (isNegativeConst(info, check.X) || isNegativeConst(info, check.Y)) { // i == neg; neg == i + return -1 + } + // Check establishes that i is non-negative. + // e.g.: i >= 0, 0 <= i, i == 0, 0 == i + if check.Op == token.GTR && (isNonNegativeConst(info, check.Y) || isIntLiteral(info, check.Y, -1)) || // i > (non-neg or -1) + check.Op == token.LSS && (isNonNegativeConst(info, check.X) || isIntLiteral(info, check.X, -1)) || // (non-neg or -1) < i + check.Op == token.GEQ && isNonNegativeConst(info, check.Y) || // i >= (non-neg) + check.Op == token.LEQ && isNonNegativeConst(info, check.X) || // (non-neg) <= i + check.Op == token.EQL && + (isNonNegativeConst(info, check.X) || isNonNegativeConst(info, check.Y)) { // i == non-neg; non-neg == i + return 1 + } + return 0 +} + +// isNegativeConst returns true if the expr is a const int with value < zero. +func isNegativeConst(info *types.Info, expr ast.Expr) bool { + if tv, ok := info.Types[expr]; ok && tv.Value != nil && tv.Value.Kind() == constant.Int { + if v, ok := constant.Int64Val(tv.Value); ok { + return v < 0 + } + } + return false +} + +// isNoneNegativeConst returns true if the expr is a const int with value >= zero. +func isNonNegativeConst(info *types.Info, expr ast.Expr) bool { + if tv, ok := info.Types[expr]; ok && tv.Value != nil && tv.Value.Kind() == constant.Int { + if v, ok := constant.Int64Val(tv.Value); ok { + return v >= 0 + } + } + return false +} + +// isBeforeSlice reports whether the SliceExpr is of the form s[:i] or s[0:i]. +func isBeforeSlice(info *types.Info, ek edge.Kind, slice *ast.SliceExpr) bool { + return ek == edge.SliceExpr_High && (slice.Low == nil || isZeroIntConst(info, slice.Low)) +} + +// isAfterSlice reports whether the SliceExpr is of the form s[i+len(substr):], +// or s[i + k:] where k is a const is equal to len(substr). +func isAfterSlice(info *types.Info, ek edge.Kind, slice *ast.SliceExpr, substr ast.Expr) bool { + lowExpr, ok := slice.Low.(*ast.BinaryExpr) + if !ok || slice.High != nil { + return false + } + // Returns true if the expression is a call to len(substr). + isLenCall := func(expr ast.Expr) bool { + call, ok := expr.(*ast.CallExpr) + if !ok || len(call.Args) != 1 { + return false + } + return sameObject(info, substr, call.Args[0]) && typeutil.Callee(info, call) == builtinLen + } + + // Handle len([]byte(substr)) + if is[*ast.CallExpr](substr) { + call := substr.(*ast.CallExpr) + tv := info.Types[call.Fun] + if tv.IsType() && types.Identical(tv.Type, byteSliceType) { + // Only one arg in []byte conversion. + substr = call.Args[0] + } + } + substrLen := -1 + substrVal := info.Types[substr].Value + if substrVal != nil { + switch substrVal.Kind() { + case constant.String: + substrLen = len(constant.StringVal(substrVal)) + case constant.Int: + // constant.Value is a byte literal, e.g. bytes.IndexByte(_, 'a') + // or a numeric byte literal, e.g. bytes.IndexByte(_, 65) + substrLen = 1 + } + } + + switch ek { + case edge.BinaryExpr_X: + kVal := info.Types[lowExpr.Y].Value + if kVal == nil { + // i + len(substr) + return lowExpr.Op == token.ADD && isLenCall(lowExpr.Y) + } else { + // i + k + kInt, ok := constant.Int64Val(kVal) + return ok && substrLen == int(kInt) + } + case edge.BinaryExpr_Y: + kVal := info.Types[lowExpr.X].Value + if kVal == nil { + // len(substr) + i + return lowExpr.Op == token.ADD && isLenCall(lowExpr.X) + } else { + // k + i + kInt, ok := constant.Int64Val(kVal) + return ok && substrLen == int(kInt) + } + } + return false +} + +// sameObject reports whether we know that the expressions resolve to the same object. +func sameObject(info *types.Info, expr1, expr2 ast.Expr) bool { + if ident1, ok := expr1.(*ast.Ident); ok { + if ident2, ok := expr2.(*ast.Ident); ok { + uses1, ok1 := info.Uses[ident1] + uses2, ok2 := info.Uses[ident2] + return ok1 && ok2 && uses1 == uses2 + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go index 9e76f953ed..7dc11308dd 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go @@ -12,22 +12,20 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var StringsCutPrefixAnalyzer = &analysis.Analyzer{ Name: "stringscutprefix", - Doc: analysisinternal.MustExtractDoc(doc, "stringscutprefix"), + Doc: analyzerutil.MustExtractDoc(doc, "stringscutprefix"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -56,12 +54,9 @@ var StringsCutPrefixAnalyzer = &analysis.Analyzer{ // Variants: // - bytes.HasPrefix/HasSuffix usage as pattern 1. func stringscutprefix(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo stringsTrimPrefix = index.Object("strings", "TrimPrefix") bytesTrimPrefix = index.Object("bytes", "TrimPrefix") @@ -72,7 +67,7 @@ func stringscutprefix(pass *analysis.Pass) (any, error) { return nil, nil } - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.20") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_20) { for curIfStmt := range curFile.Preorder((*ast.IfStmt)(nil)) { ifStmt := curIfStmt.Node().(*ast.IfStmt) @@ -206,6 +201,7 @@ func stringscutprefix(pass *analysis.Pass) (any, error) { if astutil.EqualSyntax(lhs, bin.X) && astutil.EqualSyntax(call.Args[0], bin.Y) || (astutil.EqualSyntax(lhs, bin.Y) && astutil.EqualSyntax(call.Args[0], bin.X)) { + // TODO(adonovan): avoid FreshName when not needed; see errorsastype. okVarName := refactor.FreshName(info.Scopes[ifStmt], ifStmt.Pos(), "ok") // Have one of: // if rest := TrimPrefix(s, prefix); rest != s { (ditto Suffix) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go index ef2b546364..d02a53230f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go @@ -13,19 +13,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" - "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var StringsSeqAnalyzer = &analysis.Analyzer{ Name: "stringsseq", - Doc: analysisinternal.MustExtractDoc(doc, "stringsseq"), + Doc: analyzerutil.MustExtractDoc(doc, "stringsseq"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -48,12 +46,9 @@ var StringsSeqAnalyzer = &analysis.Analyzer{ // - bytes.SplitSeq // - bytes.FieldsSeq func stringsseq(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo stringsSplit = index.Object("strings", "Split") stringsFields = index.Object("strings", "Fields") @@ -64,7 +59,7 @@ func stringsseq(pass *analysis.Pass) (any, error) { return nil, nil } - for curFile := range filesUsing(inspect, info, "go1.24") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_24) { for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { rng := curRange.Node().(*ast.RangeStmt) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go index 558cf142dd..939330521c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go @@ -17,19 +17,18 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var TestingContextAnalyzer = &analysis.Analyzer{ Name: "testingcontext", - Doc: analysisinternal.MustExtractDoc(doc, "testingcontext"), + Doc: analyzerutil.MustExtractDoc(doc, "testingcontext"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -56,8 +55,6 @@ var TestingContextAnalyzer = &analysis.Analyzer{ // - the call is within a test or subtest function // - the relevant testing.{T,B,F} is named and not shadowed at the call func testingContext(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -137,7 +134,7 @@ calls: testObj = isTestFn(info, n) } } - if testObj != nil && fileUses(info, astutil.EnclosingFile(cur), "go1.24") { + if testObj != nil && analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(cur), versions.Go1_24) { // Have a test function. Check that we can resolve the relevant // testing.{T,B,F} at the current position. if _, obj := lhs[0].Parent().LookupParent(testObj.Name(), lhs[0].Pos()); obj == testObj { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go b/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go index b890f334ba..19564c69b6 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go @@ -14,19 +14,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var WaitGroupAnalyzer = &analysis.Analyzer{ Name: "waitgroup", - Doc: analysisinternal.MustExtractDoc(doc, "waitgroup"), + Doc: analyzerutil.MustExtractDoc(doc, "waitgroup"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -61,8 +60,6 @@ var WaitGroupAnalyzer = &analysis.Analyzer{ // other effects, or blocked, or if WaitGroup.Go propagated panics // from child to parent goroutine, the argument would be different.) func waitgroup(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -128,7 +125,7 @@ func waitgroup(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curAddCall) - if !fileUses(info, file, "go1.25") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_25) { continue } tokFile := pass.Fset.File(file.Pos()) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go index fa1883b0c3..6b37295187 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -14,8 +14,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "nilfunc", - Doc: analysisutil.MustExtractDoc(doc, "nilfunc"), + Doc: analyzerutil.MustExtractDoc(doc, "nilfunc"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilfunc", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go index 8f1cca8f12..6f353968f2 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typeparams" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "nilness", - Doc: analysisutil.MustExtractDoc(doc, "nilness"), + Doc: analyzerutil.MustExtractDoc(doc, "nilness"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilness", Run: run, Requires: []*analysis.Analyzer{buildssa.Analyzer}, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index d94e592cf1..337bc6af9c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -18,11 +18,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/fmtstr" "golang.org/x/tools/internal/typeparams" @@ -39,7 +38,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "printf", - Doc: analysisutil.MustExtractDoc(doc, "printf"), + Doc: analyzerutil.MustExtractDoc(doc, "printf"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/printf", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -695,9 +694,9 @@ func checkPrintf(pass *analysis.Pass, fileVersion string, kind Kind, call *ast.C // such as the position of the %v substring of "...%v...". func opRange(formatArg ast.Expr, op *fmtstr.Operation) analysis.Range { if lit, ok := formatArg.(*ast.BasicLit); ok { - start, end, err := astutil.RangeInStringLiteral(lit, op.Range.Start, op.Range.End) + rng, err := astutil.RangeInStringLiteral(lit, op.Range.Start, op.Range.End) if err == nil { - return analysisinternal.Range(start, end) // position of "%v" + return rng // position of "%v" } } return formatArg // entire format string diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go index f7e50f98a9..8aa3962d09 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go @@ -204,8 +204,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { case *types.Struct: // report whether all the elements of the struct match the expected type. For // instance, with "%d" all the elements must be printable with the "%d" format. - for i := 0; i < typ.NumFields(); i++ { - typf := typ.Field(i) + for typf := range typ.Fields() { if !m.match(typf.Type(), false) { return false } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go index 5626ac1c12..5ce3574984 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go @@ -11,9 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "reflectvaluecompare", - Doc: analysisutil.MustExtractDoc(doc, "reflectvaluecompare"), + Doc: analyzerutil.MustExtractDoc(doc, "reflectvaluecompare"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/reflectvaluecompare", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go index 8f768bb76c..8e60e38942 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" ) // NOTE: Experimental. Not part of the vet suite. @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "shadow", - Doc: analysisutil.MustExtractDoc(doc, "shadow"), + Doc: analyzerutil.MustExtractDoc(doc, "shadow"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/shadow", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go index c339fa064d..174c27109e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go @@ -18,8 +18,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -29,7 +29,7 @@ var doc string // Analyzer describes sigchanyzer analysis function detector. var Analyzer = &analysis.Analyzer{ Name: "sigchanyzer", - Doc: analysisutil.MustExtractDoc(doc, "sigchanyzer"), + Doc: analyzerutil.MustExtractDoc(doc, "sigchanyzer"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sigchanyzer", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go index cc58396a02..4afbe04684 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go @@ -17,9 +17,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -29,7 +29,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "slog", - Doc: analysisutil.MustExtractDoc(doc, "slog"), + Doc: analyzerutil.MustExtractDoc(doc, "slog"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -168,7 +168,7 @@ func isAttr(t types.Type) bool { // "slog.Logger.With" (instead of "(*log/slog.Logger).With") func shortName(fn *types.Func) string { var r string - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { if _, named := typesinternal.ReceiverNamed(recv); named != nil { r = named.Obj().Name() } else { @@ -188,7 +188,7 @@ func kvFuncSkipArgs(fn *types.Func) (int, bool) { return 0, false } var recvName string // by default a slog package function - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { _, named := typesinternal.ReceiverNamed(recv) if named == nil { return 0, false // anon struct/interface diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index a0bdf001ab..b68385b242 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" ) //go:embed doc.go @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stdmethods", - Doc: analysisutil.MustExtractDoc(doc, "stdmethods"), + Doc: analyzerutil.MustExtractDoc(doc, "stdmethods"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdmethods", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -131,12 +131,12 @@ func canonicalMethod(pass *analysis.Pass, id *ast.Ident) { } // Do the =s (if any) all match? - if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") { + if !matchParams(expect.args, args, "=") || !matchParams(expect.results, results, "=") { return } // Everything must match. - if !matchParams(pass, expect.args, args, "") || !matchParams(pass, expect.results, results, "") { + if !matchParams(expect.args, args, "") || !matchParams(expect.results, results, "") { expectFmt := id.Name + "(" + argjoin(expect.args) + ")" if len(expect.results) == 1 { expectFmt += " " + argjoin(expect.results) @@ -168,7 +168,7 @@ func argjoin(x []string) string { } // Does each type in expect with the given prefix match the corresponding type in actual? -func matchParams(pass *analysis.Pass, expect []string, actual *types.Tuple, prefix string) bool { +func matchParams(expect []string, actual *types.Tuple, prefix string) bool { for i, x := range expect { if !strings.HasPrefix(x, prefix) { continue diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index 7a02d85ce7..0cbae68898 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -13,8 +13,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stringintconv", - Doc: analysisutil.MustExtractDoc(doc, "stringintconv"), + Doc: analyzerutil.MustExtractDoc(doc, "stringintconv"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stringintconv", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index 400a6960c6..e38c266afe 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -30,7 +30,7 @@ func init() { var Analyzer = &analysis.Analyzer{ Name: "testinggoroutine", - Doc: analysisutil.MustExtractDoc(doc, "testinggoroutine"), + Doc: analyzerutil.MustExtractDoc(doc, "testinggoroutine"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/testinggoroutine", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go index db2e5f76d1..4b68a789cf 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go @@ -36,7 +36,7 @@ func localFunctionDecls(info *types.Info, files []*ast.File) func(*types.Func) * // isMethodNamed returns true if f is a method defined // in package with the path pkgPath with a name in names. // -// (Unlike [analysisinternal.IsMethodNamed], it ignores the receiver type name.) +// (Unlike [analysis.IsMethodNamed], it ignores the receiver type name.) func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f == nil { return false @@ -44,7 +44,7 @@ func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f.Pkg() == nil || f.Pkg().Path() != pkgPath { return false } - if f.Type().(*types.Signature).Recv() == nil { + if f.Signature().Recv() == nil { return false } return slices.Contains(names, f.Name()) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index 1c0e92d01d..1f33df8403 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -15,8 +15,8 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "tests", - Doc: analysisutil.MustExtractDoc(doc, "tests"), + Doc: analyzerutil.MustExtractDoc(doc, "tests"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/tests", Run: run, } @@ -465,7 +465,7 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters. // We have currently decided to also warn before compilation/package loading. This can help users in IDEs. - pass.ReportRangef(analysisinternal.Range(tparams.Opening, tparams.Closing), + pass.ReportRangef(astutil.RangeOf(tparams.Opening, tparams.Closing), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go index db91d37c12..8353c1efa9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go @@ -16,9 +16,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -30,7 +30,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "timeformat", - Doc: analysisutil.MustExtractDoc(doc, "timeformat"), + Doc: analyzerutil.MustExtractDoc(doc, "timeformat"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { // Note: (time.Time).Format is a method and can be a typeutil.Callee // without directly importing "time". So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "time"). + // when !analysis.Imports(pass.Pkg, "time"). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 26e894bd40..38eb0b1063 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -11,9 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unmarshal", - Doc: analysisutil.MustExtractDoc(doc, "unmarshal"), + Doc: analyzerutil.MustExtractDoc(doc, "unmarshal"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ func run(pass *analysis.Pass) (any, error) { // Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode // and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee // without directly importing their packages. So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "encoding/..."). + // when !analysis.Imports(pass.Pkg, "encoding/..."). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) @@ -57,7 +57,7 @@ func run(pass *analysis.Pass) (any, error) { // Classify the callee (without allocating memory). argidx := -1 - recv := fn.Type().(*types.Signature).Recv() + recv := fn.Signature().Recv() if fn.Name() == "Unmarshal" && recv == nil { // "encoding/json".Unmarshal // "encoding/xml".Unmarshal diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 317f034992..532f38fe91 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -14,8 +14,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/refactor" ) //go:embed doc.go @@ -23,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unreachable", - Doc: analysisutil.MustExtractDoc(doc, "unreachable"), + Doc: analyzerutil.MustExtractDoc(doc, "unreachable"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, @@ -188,6 +189,11 @@ func (d *deadState) findDead(stmt ast.Stmt) { case *ast.EmptyStmt: // do not warn about unreachable empty statements default: + var ( + inspect = d.pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + curStmt, _ = inspect.Root().FindNode(stmt) + tokFile = d.pass.Fset.File(stmt.Pos()) + ) // (This call to pass.Report is a frequent source // of diagnostics beyond EOF in a truncated file; // see #71659.) @@ -196,11 +202,8 @@ func (d *deadState) findDead(stmt ast.Stmt) { End: stmt.End(), Message: "unreachable code", SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove", - TextEdits: []analysis.TextEdit{{ - Pos: stmt.Pos(), - End: stmt.End(), - }}, + Message: "Remove", + TextEdits: refactor.DeleteStmt(tokFile, curStmt), }}, }) d.reachable = true // silence error about next statement diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index 778010bc0d..ce785725e3 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -14,8 +14,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unsafeptr", - Doc: analysisutil.MustExtractDoc(doc, "unsafeptr"), + Doc: analyzerutil.MustExtractDoc(doc, "unsafeptr"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index ed4cf7ae0b..bd32d58690 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -23,10 +23,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" ) //go:embed doc.go @@ -34,7 +34,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unusedresult", - Doc: analysisutil.MustExtractDoc(doc, "unusedresult"), + Doc: analyzerutil.MustExtractDoc(doc, "unusedresult"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -150,11 +150,11 @@ func run(pass *analysis.Pass) (any, error) { if !ok { return // e.g. var or builtin } - if sig := fn.Type().(*types.Signature); sig.Recv() != nil { + if sig := fn.Signature(); sig.Recv() != nil { // method (e.g. foo.String()) if types.Identical(sig, sigNoArgsStringResult) { if stringMethods[fn.Name()] { - pass.ReportRangef(analysisinternal.Range(call.Pos(), call.Lparen), + pass.ReportRangef(astutil.RangeOf(call.Pos(), call.Lparen), "result of (%s).%s call not used", sig.Recv().Type(), fn.Name()) } @@ -162,7 +162,7 @@ func run(pass *analysis.Pass) (any, error) { } else { // package-level function (e.g. fmt.Errorf) if pkgFuncs[[2]string{fn.Pkg().Path(), fn.Name()}] { - pass.ReportRangef(analysisinternal.Range(call.Pos(), call.Lparen), + pass.ReportRangef(astutil.RangeOf(call.Pos(), call.Lparen), "result of %s.%s call not used", fn.Pkg().Path(), fn.Name()) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go index 2e209c8a6c..9bf9f5455f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go @@ -10,8 +10,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typeparams" ) @@ -22,7 +22,7 @@ var doc string // that are never read. var Analyzer = &analysis.Analyzer{ Name: "unusedwrite", - Doc: analysisutil.MustExtractDoc(doc, "unusedwrite"), + Doc: analyzerutil.MustExtractDoc(doc, "unusedwrite"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedwrite", Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go index 5ed1814f77..c2e20521e9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "waitgroup", - Doc: analysisutil.MustExtractDoc(doc, "waitgroup"), + Doc: analyzerutil.MustExtractDoc(doc, "waitgroup"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go index 7e72d3c284..fc9bbc714c 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -467,7 +467,9 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { // This algorithm could be implemented using c.Inspect, // but it is about 2.5x slower. - best := int32(-1) // push index of latest (=innermost) node containing range + // best is the push-index of the latest (=innermost) node containing range. + // (Beware: latest is not always innermost because FuncDecl.{Name,Type} overlap.) + best := int32(-1) for i, limit := c.indices(); i < limit; i++ { ev := events[i] if ev.index > i { // push? @@ -481,6 +483,19 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { continue } } else { + // Edge case: FuncDecl.Name and .Type overlap: + // Don't update best from Name to FuncDecl.Type. + // + // The condition can be read as: + // - n is FuncType + // - n.parent is FuncDecl + // - best is strictly beneath the FuncDecl + if ev.typ == 1< ev.parent { + continue + } + nodeEnd = n.End() if n.Pos() > start { break // disjoint, after; stop diff --git a/vendor/golang.org/x/tools/go/cfg/builder.go b/vendor/golang.org/x/tools/go/cfg/builder.go index ac4d63c400..f16cd42309 100644 --- a/vendor/golang.org/x/tools/go/cfg/builder.go +++ b/vendor/golang.org/x/tools/go/cfg/builder.go @@ -13,7 +13,7 @@ import ( ) type builder struct { - cfg *CFG + blocks []*Block mayReturn func(*ast.CallExpr) bool current *Block lblocks map[string]*lblock // labeled blocks @@ -32,12 +32,18 @@ start: *ast.SendStmt, *ast.IncDecStmt, *ast.GoStmt, - *ast.DeferStmt, *ast.EmptyStmt, *ast.AssignStmt: // No effect on control flow. b.add(s) + case *ast.DeferStmt: + b.add(s) + // Assume conservatively that this behaves like: + // defer func() { recover() } + // so any subsequent panic may act like a return. + b.current.returns = true + case *ast.ExprStmt: b.add(s) if call, ok := s.X.(*ast.CallExpr); ok && !b.mayReturn(call) { @@ -64,6 +70,7 @@ start: goto start // effectively: tailcall stmt(g, s.Stmt, label) case *ast.ReturnStmt: + b.current.returns = true b.add(s) b.current = b.newBlock(KindUnreachable, s) @@ -483,14 +490,13 @@ func (b *builder) labeledBlock(label *ast.Ident, stmt *ast.LabeledStmt) *lblock // It does not automatically become the current block. // comment is an optional string for more readable debugging output. func (b *builder) newBlock(kind BlockKind, stmt ast.Stmt) *Block { - g := b.cfg block := &Block{ - Index: int32(len(g.Blocks)), + Index: int32(len(b.blocks)), Kind: kind, Stmt: stmt, } block.Succs = block.succs2[:0] - g.Blocks = append(g.Blocks, block) + b.blocks = append(b.blocks, block) return block } diff --git a/vendor/golang.org/x/tools/go/cfg/cfg.go b/vendor/golang.org/x/tools/go/cfg/cfg.go index 29a39f698c..38aba77c29 100644 --- a/vendor/golang.org/x/tools/go/cfg/cfg.go +++ b/vendor/golang.org/x/tools/go/cfg/cfg.go @@ -47,13 +47,16 @@ import ( "go/ast" "go/format" "go/token" + + "golang.org/x/tools/internal/cfginternal" ) // A CFG represents the control-flow graph of a single function. // // The entry point is Blocks[0]; there may be multiple return blocks. type CFG struct { - Blocks []*Block // block[0] is entry; order otherwise undefined + Blocks []*Block // block[0] is entry; order otherwise undefined + noreturn bool // function body lacks a reachable return statement } // A Block represents a basic block: a list of statements and @@ -67,12 +70,13 @@ type CFG struct { // an [ast.Expr], Succs[0] is the successor if the condition is true, and // Succs[1] is the successor if the condition is false. type Block struct { - Nodes []ast.Node // statements, expressions, and ValueSpecs - Succs []*Block // successor nodes in the graph - Index int32 // index within CFG.Blocks - Live bool // block is reachable from entry - Kind BlockKind // block kind - Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details) + Nodes []ast.Node // statements, expressions, and ValueSpecs + Succs []*Block // successor nodes in the graph + Index int32 // index within CFG.Blocks + Live bool // block is reachable from entry + returns bool // block contains return or defer (which may recover and return) + Kind BlockKind // block kind + Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details) succs2 [2]*Block // underlying array for Succs } @@ -141,14 +145,14 @@ func (kind BlockKind) String() string { func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { b := builder{ mayReturn: mayReturn, - cfg: new(CFG), } b.current = b.newBlock(KindBody, body) b.stmt(body) - // Compute liveness (reachability from entry point), breadth-first. - q := make([]*Block, 0, len(b.cfg.Blocks)) - q = append(q, b.cfg.Blocks[0]) // entry point + // Compute liveness (reachability from entry point), + // breadth-first, marking Block.Live flags. + q := make([]*Block, 0, len(b.blocks)) + q = append(q, b.blocks[0]) // entry point for len(q) > 0 { b := q[len(q)-1] q = q[:len(q)-1] @@ -162,12 +166,30 @@ func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { // Does control fall off the end of the function's body? // Make implicit return explicit. if b.current != nil && b.current.Live { + b.current.returns = true b.add(&ast.ReturnStmt{ Return: body.End() - 1, }) } - return b.cfg + // Is any return (or defer+recover) block reachable? + noreturn := true + for _, bl := range b.blocks { + if bl.Live && bl.returns { + noreturn = false + break + } + } + + return &CFG{Blocks: b.blocks, noreturn: noreturn} +} + +// isNoReturn reports whether the function has no reachable return. +// TODO(adonovan): add (*CFG).NoReturn to public API. +func isNoReturn(_cfg any) bool { return _cfg.(*CFG).noreturn } + +func init() { + cfginternal.IsNoReturn = isNoReturn // expose to ctrlflow analyzer } func (b *Block) String() string { @@ -187,6 +209,14 @@ func (b *Block) comment(fset *token.FileSet) string { // // When control falls off the end of the function, the ReturnStmt is synthetic // and its [ast.Node.End] position may be beyond the end of the file. +// +// A function that contains no return statement (explicit or implied) +// may yet return normally, and may even return a nonzero value. For example: +// +// func() (res any) { +// defer func() { res = recover() }() +// panic(123) +// } func (b *Block) Return() (ret *ast.ReturnStmt) { if len(b.Nodes) > 0 { ret, _ = b.Nodes[len(b.Nodes)-1].(*ast.ReturnStmt) diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index af6a60d75f..c546b1b63e 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -78,7 +78,7 @@ func PrintErrors(pkgs []*Package) int { return n } -// Postorder returns an iterator over the the packages in +// Postorder returns an iterator over the packages in // the import graph whose roots are pkg. // Packages are enumerated in dependencies-first order. func Postorder(pkgs []*Package) iter.Seq[*Package] { diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go index 41857ffbb9..a75257c8b1 100644 --- a/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -110,10 +110,11 @@ var ( tEface = types.NewInterfaceType(nil, nil).Complete() // SSA Value constants. - vZero = intConst(0) - vOne = intConst(1) - vTrue = NewConst(constant.MakeBool(true), tBool) - vFalse = NewConst(constant.MakeBool(false), tBool) + vZero = intConst(0) + vOne = intConst(1) + vTrue = NewConst(constant.MakeBool(true), tBool) + vFalse = NewConst(constant.MakeBool(false), tBool) + vNoReturn = NewConst(constant.MakeString("noreturn"), tString) jReady = intConst(0) // range-over-func jump is READY jBusy = intConst(-1) // range-over-func jump is BUSY @@ -291,7 +292,7 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value { var c Call b.setCall(fn, e, &c.Call) c.typ = typ - return fn.emit(&c) + return emitCall(fn, &c) case *ast.IndexExpr: mapt := typeparams.CoreType(fn.typeOf(e.X)).(*types.Map) // ,ok must be a map. @@ -723,7 +724,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { var v Call b.setCall(fn, e, &v.Call) v.setType(fn.typ(tv.Type)) - return fn.emit(&v) + return emitCall(fn, &v) case *ast.UnaryExpr: switch e.Op { @@ -2343,7 +2344,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { // for x := range f { ... } // into // f(func(x T) bool { ... }) - b.rangeFunc(fn, x, tk, tv, s, label) + b.rangeFunc(fn, x, s, label) return default: @@ -2389,7 +2390,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { // rangeFunc emits to fn code for the range-over-func rng.Body of the iterator // function x, optionally labelled by label. It creates a new anonymous function // yield for rng and builds the function. -func (b *builder) rangeFunc(fn *Function, x Value, tk, tv types.Type, rng *ast.RangeStmt, label *lblock) { +func (b *builder) rangeFunc(fn *Function, x Value, rng *ast.RangeStmt, label *lblock) { // Consider the SSA code for the outermost range-over-func in fn: // // func fn(...) (ret R) { @@ -2993,8 +2994,8 @@ func (b *builder) buildYieldFunc(fn *Function) { fn.source = fn.parent.source fn.startBody() params := fn.Signature.Params() - for i := 0; i < params.Len(); i++ { - fn.addParamVar(params.At(i)) + for v := range params.Variables() { + fn.addParamVar(v) } // Initial targets diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go index 2fa3d0757a..a8778788ab 100644 --- a/vendor/golang.org/x/tools/go/ssa/create.go +++ b/vendor/golang.org/x/tools/go/ssa/create.go @@ -15,6 +15,7 @@ import ( "os" "sync" + "golang.org/x/tools/internal/ssainternal" "golang.org/x/tools/internal/versions" ) @@ -312,3 +313,20 @@ func (prog *Program) AllPackages() []*Package { func (prog *Program) ImportedPackage(path string) *Package { return prog.imported[path] } + +// setNoReturns sets the predicate used by the SSA builder to decide +// whether a call to the specified named function cannot return, +// allowing the builder to prune control-flow edges following the +// call, thus improving the precision of downstream analysis. +// +// TODO(adonovan): add (*Program).SetNoReturn to the public API. +func (prog *Program) setNoReturn(noReturn func(*types.Func) bool) { + prog.noReturn = noReturn +} + +func init() { + // SetNoReturn exposes Program.setNoReturn to the buildssa analyzer. + ssainternal.SetNoReturn = func(prog any, noReturn func(*types.Func) bool) { + prog.(*Program).setNoReturn(noReturn) + } +} diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go index e53ebf5a7f..31aa5de8d7 100644 --- a/vendor/golang.org/x/tools/go/ssa/emit.go +++ b/vendor/golang.org/x/tools/go/ssa/emit.go @@ -488,7 +488,7 @@ func emitTailCall(f *Function, call *Call) { } else { call.typ = tresults } - tuple := f.emit(call) + tuple := emitCall(f, call) var ret Return switch nr { case 0: @@ -509,6 +509,27 @@ func emitTailCall(f *Function, call *Call) { f.currentBlock = nil } +// emitCall emits a call instruction. If the callee is "no return", +// it also emits a panic to eliminate infeasible CFG edges. +func emitCall(fn *Function, call *Call) Value { + res := fn.emit(call) + + callee := call.Call.StaticCallee() + if callee != nil && + callee.object != nil && + fn.Prog.noReturn != nil && + fn.Prog.noReturn(callee.object) { + // Call cannot return. Insert a panic after it. + fn.emit(&Panic{ + X: emitConv(fn, vNoReturn, tEface), + pos: call.Pos(), + }) + fn.currentBlock = fn.newBasicBlock("unreachable.noreturn") + } + + return res +} + // emitImplicitSelections emits to f code to apply the sequence of // implicit field selections specified by indices to base value v, and // returns the selected value. diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go index f48bd7184a..33a12444d5 100644 --- a/vendor/golang.org/x/tools/go/ssa/func.go +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -668,7 +668,11 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { continue } n, _ := fmt.Fprintf(buf, "%d:", b.Index) + // (|predecessors|, |successors|, immediate dominator) bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs)) + if b.Idom() != nil { + bmsg = fmt.Sprintf("%s idom:%d", bmsg, b.Idom().Index) + } fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg) if false { // CFG debugging diff --git a/vendor/golang.org/x/tools/go/ssa/instantiate.go b/vendor/golang.org/x/tools/go/ssa/instantiate.go index 20a0986e6d..5862440a65 100644 --- a/vendor/golang.org/x/tools/go/ssa/instantiate.go +++ b/vendor/golang.org/x/tools/go/ssa/instantiate.go @@ -83,7 +83,7 @@ func createInstance(fn *Function, targs []types.Type) *Function { if prog.mode&InstantiateGenerics != 0 && !prog.isParameterized(targs...) { synthetic = fmt.Sprintf("instance of %s", fn.Name()) if fn.syntax != nil { - subst = makeSubster(prog.ctxt, obj, fn.typeparams, targs, false) + subst = makeSubster(prog.ctxt, obj, fn.typeparams, targs) build = (*builder).buildFromSyntax } else { build = (*builder).buildParamsOnly diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go index ecad99d034..7c84494c32 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssa.go +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -45,6 +45,8 @@ type Program struct { // to avoid creation of duplicate methods from type information. objectMethodsMu sync.Mutex objectMethods map[*types.Func]*Function + + noReturn func(*types.Func) bool // (optional) predicate that decides whether a given call cannot return } // A Package is a single analyzed Go package containing Members for diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go index b4feb42cb3..7300d2bf37 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go @@ -74,8 +74,8 @@ func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool { methodsOf := func(T types.Type) { if !types.IsInterface(T) { mset := prog.MethodSets.MethodSet(T) - for i := 0; i < mset.Len(); i++ { - function(prog.MethodValue(mset.At(i))) + for method := range mset.Methods() { + function(prog.MethodValue(method)) } } } diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go index 2c465ec0ae..5799a07802 100644 --- a/vendor/golang.org/x/tools/go/ssa/subst.go +++ b/vendor/golang.org/x/tools/go/ssa/subst.go @@ -59,7 +59,7 @@ type subster struct { // Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache. // targs should not contain any types in tparams. // fn is the generic function for which we are substituting. -func makeSubster(ctxt *types.Context, fn *types.Func, tparams *types.TypeParamList, targs []types.Type, debug bool) *subster { +func makeSubster(ctxt *types.Context, fn *types.Func, tparams *types.TypeParamList, targs []types.Type) *subster { assert(tparams.Len() == len(targs), "makeSubster argument count must match") subst := &subster{ @@ -352,8 +352,7 @@ func (subst *subster) alias(t *types.Alias) types.Type { // Copy and substitute type params. var newTParams []*types.TypeParam - for i := 0; i < tparams.Len(); i++ { - cur := tparams.At(i) + for cur := range tparams.TypeParams() { cobj := cur.Obj() cname := types.NewTypeName(cobj.Pos(), cobj.Pkg(), cobj.Name(), nil) ntp := types.NewTypeParam(cname, nil) @@ -488,8 +487,7 @@ func (subst *subster) named(t *types.Named) types.Type { obj := types.NewTypeName(tname.Pos(), tname.Pkg(), tname.Name(), nil) fresh := types.NewNamed(obj, nil, nil) var newTParams []*types.TypeParam - for i := 0; i < tparams.Len(); i++ { - cur := tparams.At(i) + for cur := range tparams.TypeParams() { cobj := cur.Obj() cname := types.NewTypeName(cobj.Pos(), cobj.Pkg(), cobj.Name(), nil) ntp := types.NewTypeParam(cname, nil) diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go index 932eb6cb0e..42f9621c3f 100644 --- a/vendor/golang.org/x/tools/go/ssa/util.go +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -121,7 +121,7 @@ func is[T any](x any) bool { // recvType returns the receiver type of method obj. func recvType(obj *types.Func) types.Type { - return obj.Type().(*types.Signature).Recv().Type() + return obj.Signature().Recv().Type() } // fieldOf returns the index'th field of the (core type of) a struct type; @@ -200,7 +200,7 @@ func makeLen(T types.Type) *Builtin { // receiverTypeArgs returns the type arguments to a method's receiver. // Returns an empty list if the receiver does not have type arguments. func receiverTypeArgs(method *types.Func) []types.Type { - recv := method.Type().(*types.Signature).Recv() + recv := method.Signature().Recv() _, named := typesinternal.ReceiverNamed(recv) if named == nil { return nil // recv is anonymous struct/interface @@ -221,8 +221,8 @@ func receiverTypeArgs(method *types.Func) []types.Type { func recvAsFirstArg(sig *types.Signature) *types.Signature { params := make([]*types.Var, 0, 1+sig.Params().Len()) params = append(params, sig.Recv()) - for i := 0; i < sig.Params().Len(); i++ { - params = append(params, sig.Params().At(i)) + for v := range sig.Params().Variables() { + params = append(params, v) } return types.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic()) } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 6c0c74968f..6646bf5508 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -249,7 +249,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { case *types.Func: // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + if recv := obj.Signature().Recv(); recv == nil { return "", fmt.Errorf("func is not a method: %v", obj) } @@ -405,7 +405,7 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { return "", false } - _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + _, named := typesinternal.ReceiverNamed(meth.Signature().Recv()) if named == nil { return "", false } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index f035a0b6be..36624572a6 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -304,8 +304,7 @@ func (h hasher) hash(t types.Type) uint32 { case *types.Named: hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() - for i := 0; i < targs.Len(); i++ { - targ := targs.At(i) + for targ := range targs.Types() { hash += 2 * h.hash(targ) } return hash diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index 22ae777726..5d120d077c 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -69,3 +69,9 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { } return intimp.Process(filename, src, intopt) } + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/barbendor/a/b") return "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go new file mode 100644 index 0000000000..74a2a1c815 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go @@ -0,0 +1,6 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analyzerutil provides implementation helpers for analyzers. +package analyzerutil diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go similarity index 97% rename from vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go rename to vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go index bfb5900f1b..772a0300da 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package analysisinternal +package analyzerutil import ( "fmt" @@ -35,7 +35,7 @@ import ( // // var Analyzer = &analysis.Analyzer{ // Name: "halting", -// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// Doc: analyzerutil.MustExtractDoc(doc, "halting"), // ... // } func MustExtractDoc(content, name string) string { diff --git a/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go new file mode 100644 index 0000000000..ecc30cae04 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analyzerutil + +// This file defines helpers for calling [analysis.Pass.ReadFile]. + +import ( + "go/token" + "os" + + "golang.org/x/tools/go/analysis" +) + +// ReadFile reads a file and adds it to the FileSet in pass +// so that we can report errors against it using lineStart. +func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { + readFile := pass.ReadFile + if readFile == nil { + readFile = os.ReadFile + } + content, err := readFile(filename) + if err != nil { + return nil, nil, err + } + tf := pass.Fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + return content, tf, nil +} diff --git a/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go new file mode 100644 index 0000000000..0b9bcc37b6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go @@ -0,0 +1,42 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analyzerutil + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/packagepath" + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// FileUsesGoVersion reports whether the specified file may use features of the +// specified version of Go (e.g. "go1.24"). +// +// Tip: we recommend using this check "late", just before calling +// pass.Report, rather than "early" (when entering each ast.File, or +// each candidate node of interest, during the traversal), because the +// operation is not free, yet is not a highly selective filter: the +// fraction of files that pass most version checks is high and +// increases over time. +func FileUsesGoVersion(pass *analysis.Pass, file *ast.File, version string) (_res bool) { + fileVersion := pass.TypesInfo.FileVersions[file] + + // Standard packages that are part of toolchain bootstrapping + // are not considered to use a version of Go later than the + // current bootstrap toolchain version. + // The bootstrap rule does not cover tests, + // and some tests (e.g. debug/elf/file_test.go) rely on this. + pkgpath := pass.Pkg.Path() + if packagepath.IsStdPackage(pkgpath) && + stdlib.IsBootstrapPackage(pkgpath) && // (excludes "*_test" external test packages) + !strings.HasSuffix(pass.Fset.File(file.Pos()).Name(), "_test.go") { // (excludes all tests) + fileVersion = stdlib.BootstrapVersion.String() // package must bootstrap + } + + return !versions.Before(fileVersion, version) +} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/typeindex/typeindex.go b/vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go similarity index 88% rename from vendor/golang.org/x/tools/internal/analysisinternal/typeindex/typeindex.go rename to vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go index bba21c6ea0..41146d9abb 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/typeindex/typeindex.go +++ b/vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go @@ -22,12 +22,12 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "typeindex", Doc: "indexes of type information for later passes", - URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysisinternal/typeindex", + URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysis/typeindex", Run: func(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) return typeindex.New(inspect, pass.Pkg, pass.TypesInfo), nil }, RunDespiteErrors: true, Requires: []*analysis.Analyzer{inspect.Analyzer}, - ResultType: reflect.TypeOf(new(typeindex.Index)), + ResultType: reflect.TypeFor[*typeindex.Index](), } diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go deleted file mode 100644 index 2b4a8ebb6e..0000000000 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package analysisinternal provides gopls' internal analyses with a -// number of helper functions that operate on typed syntax trees. -package analysisinternal - -import ( - "cmp" - "fmt" - "go/ast" - "go/token" - "go/types" - "slices" - "strings" - - "golang.org/x/tools/go/analysis" -) - -// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types. -// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within -// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that -// is unrecognized. -// -// TODO(adonovan): this is only used by gopls/internal/analysis/fill{returns,struct}. Move closer. -func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string { - - // Initialize matches to contain the variable types we are searching for. - matches := make(map[types.Type][]string) - for _, typ := range typs { - if typ == nil { - continue // TODO(adonovan): is this reachable? - } - matches[typ] = nil // create entry - } - - seen := map[types.Object]struct{}{} - ast.Inspect(node, func(n ast.Node) bool { - if n == nil { - return false - } - // Prevent circular definitions. If 'pos' is within an assignment statement, do not - // allow any identifiers in that assignment statement to be selected. Otherwise, - // we could do the following, where 'x' satisfies the type of 'f0': - // - // x := fakeStruct{f0: x} - // - if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() { - return false - } - if n.End() > pos { - return n.Pos() <= pos - } - ident, ok := n.(*ast.Ident) - if !ok || ident.Name == "_" { - return true - } - obj := info.Defs[ident] - if obj == nil || obj.Type() == nil { - return true - } - if _, ok := obj.(*types.TypeName); ok { - return true - } - // Prevent duplicates in matches' values. - if _, ok = seen[obj]; ok { - return true - } - seen[obj] = struct{}{} - // Find the scope for the given position. Then, check whether the object - // exists within the scope. - innerScope := pkg.Scope().Innermost(pos) - if innerScope == nil { - return true - } - _, foundObj := innerScope.LookupParent(ident.Name, pos) - if foundObj != obj { - return true - } - // The object must match one of the types that we are searching for. - // TODO(adonovan): opt: use typeutil.Map? - if names, ok := matches[obj.Type()]; ok { - matches[obj.Type()] = append(names, ident.Name) - } else { - // If the object type does not exactly match - // any of the target types, greedily find the first - // target type that the object type can satisfy. - for typ := range matches { - if equivalentTypes(obj.Type(), typ) { - matches[typ] = append(matches[typ], ident.Name) - } - } - } - return true - }) - return matches -} - -func equivalentTypes(want, got types.Type) bool { - if types.Identical(want, got) { - return true - } - // Code segment to help check for untyped equality from (golang/go#32146). - if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { - if lhs, ok := got.Underlying().(*types.Basic); ok { - return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType - } - } - return types.AssignableTo(want, got) -} - -// A ReadFileFunc is a function that returns the -// contents of a file, such as [os.ReadFile]. -type ReadFileFunc = func(filename string) ([]byte, error) - -// CheckedReadFile returns a wrapper around a Pass.ReadFile -// function that performs the appropriate checks. -func CheckedReadFile(pass *analysis.Pass, readFile ReadFileFunc) ReadFileFunc { - return func(filename string) ([]byte, error) { - if err := CheckReadable(pass, filename); err != nil { - return nil, err - } - return readFile(filename) - } -} - -// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. -func CheckReadable(pass *analysis.Pass, filename string) error { - if slices.Contains(pass.OtherFiles, filename) || - slices.Contains(pass.IgnoredFiles, filename) { - return nil - } - for _, f := range pass.Files { - if pass.Fset.File(f.FileStart).Name() == filename { - return nil - } - } - return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) -} - -// ValidateFixes validates the set of fixes for a single diagnostic. -// Any error indicates a bug in the originating analyzer. -// -// It updates fixes so that fixes[*].End.IsValid(). -// -// It may be used as part of an analysis driver implementation. -func ValidateFixes(fset *token.FileSet, a *analysis.Analyzer, fixes []analysis.SuggestedFix) error { - fixMessages := make(map[string]bool) - for i := range fixes { - fix := &fixes[i] - if fixMessages[fix.Message] { - return fmt.Errorf("analyzer %q suggests two fixes with same Message (%s)", a.Name, fix.Message) - } - fixMessages[fix.Message] = true - if err := validateFix(fset, fix); err != nil { - return fmt.Errorf("analyzer %q suggests invalid fix (%s): %v", a.Name, fix.Message, err) - } - } - return nil -} - -// validateFix validates a single fix. -// Any error indicates a bug in the originating analyzer. -// -// It updates fix so that fix.End.IsValid(). -func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { - - // Stably sort edits by Pos. This ordering puts insertions - // (end = start) before deletions (end > start) at the same - // point, but uses a stable sort to preserve the order of - // multiple insertions at the same point. - slices.SortStableFunc(fix.TextEdits, func(x, y analysis.TextEdit) int { - if sign := cmp.Compare(x.Pos, y.Pos); sign != 0 { - return sign - } - return cmp.Compare(x.End, y.End) - }) - - var prev *analysis.TextEdit - for i := range fix.TextEdits { - edit := &fix.TextEdits[i] - - // Validate edit individually. - start := edit.Pos - file := fset.File(start) - if file == nil { - return fmt.Errorf("no token.File for TextEdit.Pos (%v)", edit.Pos) - } - fileEnd := token.Pos(file.Base() + file.Size()) - if end := edit.End; end.IsValid() { - if end < start { - return fmt.Errorf("TextEdit.Pos (%v) > TextEdit.End (%v)", edit.Pos, edit.End) - } - endFile := fset.File(end) - if endFile != file && end < fileEnd+10 { - // Relax the checks below in the special case when the end position - // is only slightly beyond EOF, as happens when End is computed - // (as in ast.{Struct,Interface}Type) rather than based on - // actual token positions. In such cases, truncate end to EOF. - // - // This is a workaround for #71659; see: - // https://github.com/golang/go/issues/71659#issuecomment-2651606031 - // A better fix would be more faithful recording of token - // positions (or their absence) in the AST. - edit.End = fileEnd - continue - } - if endFile == nil { - return fmt.Errorf("no token.File for TextEdit.End (%v; File(start).FileEnd is %d)", end, file.Base()+file.Size()) - } - if endFile != file { - return fmt.Errorf("edit #%d spans files (%v and %v)", - i, file.Position(edit.Pos), endFile.Position(edit.End)) - } - } else { - edit.End = start // update the SuggestedFix - } - if eof := fileEnd; edit.End > eof { - return fmt.Errorf("end is (%v) beyond end of file (%v)", edit.End, eof) - } - - // Validate the sequence of edits: - // properly ordered, no overlapping deletions - if prev != nil && edit.Pos < prev.End { - xpos := fset.Position(prev.Pos) - xend := fset.Position(prev.End) - ypos := fset.Position(edit.Pos) - yend := fset.Position(edit.End) - return fmt.Errorf("overlapping edits to %s (%d:%d-%d:%d and %d:%d-%d:%d)", - xpos.Filename, - xpos.Line, xpos.Column, - xend.Line, xend.Column, - ypos.Line, ypos.Column, - yend.Line, yend.Column, - ) - } - prev = edit - } - - return nil -} - -// Range returns an [analysis.Range] for the specified start and end positions. -func Range(pos, end token.Pos) analysis.Range { - return tokenRange{pos, end} -} - -// tokenRange is an implementation of the [analysis.Range] interface. -type tokenRange struct{ StartPos, EndPos token.Pos } - -func (r tokenRange) Pos() token.Pos { return r.StartPos } -func (r tokenRange) End() token.Pos { return r.EndPos } - -// TODO(adonovan): the import-related functions below don't depend on -// analysis (or even on go/types or go/ast). Move somewhere more logical. - -// CanImport reports whether one package is allowed to import another. -// -// TODO(adonovan): allow customization of the accessibility relation -// (e.g. for Bazel). -func CanImport(from, to string) bool { - // TODO(adonovan): better segment hygiene. - if to == "internal" || strings.HasPrefix(to, "internal/") { - // Special case: only std packages may import internal/... - // We can't reliably know whether we're in std, so we - // use a heuristic on the first segment. - first, _, _ := strings.Cut(from, "/") - if strings.Contains(first, ".") { - return false // example.com/foo ∉ std - } - if first == "testdata" { - return false // testdata/foo ∉ std - } - } - if strings.HasSuffix(to, "/internal") { - return strings.HasPrefix(from, to[:len(to)-len("/internal")]) - } - if i := strings.LastIndex(to, "/internal/"); i >= 0 { - return strings.HasPrefix(from, to[:i]) - } - return true -} - -// IsStdPackage reports whether the specified package path belongs to a -// package in the standard library (including internal dependencies). -func IsStdPackage(path string) bool { - // A standard package has no dot in its first segment. - // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) - slash := strings.IndexByte(path, '/') - if slash < 0 { - slash = len(path) - } - return !strings.Contains(path[:slash], ".") && path != "testdata" -} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go b/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go deleted file mode 100644 index 13e1b69021..0000000000 --- a/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package generated defines an analyzer whose result makes it -// convenient to skip diagnostics within generated files. -package generated - -import ( - "go/ast" - "go/token" - "reflect" - - "golang.org/x/tools/go/analysis" -) - -var Analyzer = &analysis.Analyzer{ - Name: "generated", - Doc: "detect which Go files are generated", - URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysisinternal/generated", - ResultType: reflect.TypeFor[*Result](), - Run: func(pass *analysis.Pass) (any, error) { - set := make(map[*token.File]bool) - for _, file := range pass.Files { - if ast.IsGenerated(file) { - set[pass.Fset.File(file.FileStart)] = true - } - } - return &Result{fset: pass.Fset, generatedFiles: set}, nil - }, -} - -type Result struct { - fset *token.FileSet - generatedFiles map[*token.File]bool -} - -// IsGenerated reports whether the position is within a generated file. -func (r *Result) IsGenerated(pos token.Pos) bool { - return r.generatedFiles[r.fset.File(pos)] -} diff --git a/vendor/golang.org/x/tools/internal/astutil/stringlit.go b/vendor/golang.org/x/tools/internal/astutil/stringlit.go index 849d45d853..ce1e7de882 100644 --- a/vendor/golang.org/x/tools/internal/astutil/stringlit.go +++ b/vendor/golang.org/x/tools/internal/astutil/stringlit.go @@ -14,16 +14,16 @@ import ( // RangeInStringLiteral calculates the positional range within a string literal // corresponding to the specified start and end byte offsets within the logical string. -func RangeInStringLiteral(lit *ast.BasicLit, start, end int) (token.Pos, token.Pos, error) { +func RangeInStringLiteral(lit *ast.BasicLit, start, end int) (Range, error) { startPos, err := PosInStringLiteral(lit, start) if err != nil { - return 0, 0, fmt.Errorf("start: %v", err) + return Range{}, fmt.Errorf("start: %v", err) } endPos, err := PosInStringLiteral(lit, end) if err != nil { - return 0, 0, fmt.Errorf("end: %v", err) + return Range{}, fmt.Errorf("end: %v", err) } - return startPos, endPos, nil + return Range{startPos, endPos}, nil } // PosInStringLiteral returns the position within a string literal diff --git a/vendor/golang.org/x/tools/internal/astutil/util.go b/vendor/golang.org/x/tools/internal/astutil/util.go index a1c0983504..7a02fca21e 100644 --- a/vendor/golang.org/x/tools/internal/astutil/util.go +++ b/vendor/golang.org/x/tools/internal/astutil/util.go @@ -5,6 +5,7 @@ package astutil import ( + "fmt" "go/ast" "go/printer" "go/token" @@ -50,28 +51,26 @@ func PreorderStack(root ast.Node, stack []ast.Node, f func(n ast.Node, stack []a } // NodeContains reports whether the Pos/End range of node n encloses -// the given position pos. +// the given range. // // It is inclusive of both end points, to allow hovering (etc) when // the cursor is immediately after a node. // -// For unfortunate historical reasons, the Pos/End extent of an -// ast.File runs from the start of its package declaration---excluding -// copyright comments, build tags, and package documentation---to the -// end of its last declaration, excluding any trailing comments. So, -// as a special case, if n is an [ast.File], NodeContains uses -// n.FileStart <= pos && pos <= n.FileEnd to report whether the -// position lies anywhere within the file. +// Like [NodeRange], it treats the range of an [ast.File] as the +// file's complete extent. // // Precondition: n must not be nil. -func NodeContains(n ast.Node, pos token.Pos) bool { - var start, end token.Pos - if file, ok := n.(*ast.File); ok { - start, end = file.FileStart, file.FileEnd // entire file - } else { - start, end = n.Pos(), n.End() - } - return start <= pos && pos <= end +func NodeContains(n ast.Node, rng Range) bool { + return NodeRange(n).Contains(rng) +} + +// NodeContainPos reports whether the Pos/End range of node n encloses +// the given pos. +// +// Like [NodeRange], it treats the range of an [ast.File] as the +// file's complete extent. +func NodeContainsPos(n ast.Node, pos token.Pos) bool { + return NodeRange(n).ContainsPos(pos) } // IsChildOf reports whether cur.ParentEdge is ek. @@ -117,3 +116,118 @@ func Format(fset *token.FileSet, n ast.Node) string { printer.Fprint(&buf, fset, n) // ignore errors return buf.String() } + +// -- Range -- + +// Range is a Pos interval. +// It implements [analysis.Range] and [ast.Node]. +type Range struct{ Start, EndPos token.Pos } + +// RangeOf constructs a Range. +// +// RangeOf exists to pacify the "unkeyed literal" (composites) vet +// check. It would be nice if there were a way for a type to add +// itself to the allowlist. +func RangeOf(start, end token.Pos) Range { return Range{start, end} } + +// NodeRange returns the extent of node n as a Range. +// +// For unfortunate historical reasons, the Pos/End extent of an +// ast.File runs from the start of its package declaration---excluding +// copyright comments, build tags, and package documentation---to the +// end of its last declaration, excluding any trailing comments. So, +// as a special case, if n is an [ast.File], NodeContains uses +// n.FileStart <= pos && pos <= n.FileEnd to report whether the +// position lies anywhere within the file. +func NodeRange(n ast.Node) Range { + if file, ok := n.(*ast.File); ok { + return Range{file.FileStart, file.FileEnd} // entire file + } + return Range{n.Pos(), n.End()} +} + +func (r Range) Pos() token.Pos { return r.Start } +func (r Range) End() token.Pos { return r.EndPos } + +// ContainsPos reports whether the range (inclusive of both end points) +// includes the specified position. +func (r Range) ContainsPos(pos token.Pos) bool { + return r.Contains(RangeOf(pos, pos)) +} + +// Contains reports whether the range (inclusive of both end points) +// includes the specified range. +func (r Range) Contains(rng Range) bool { + return r.Start <= rng.Start && rng.EndPos <= r.EndPos +} + +// IsValid reports whether the range is valid. +func (r Range) IsValid() bool { return r.Start.IsValid() && r.Start <= r.EndPos } + +// -- + +// Select returns the syntax nodes identified by a user's text +// selection. It returns three nodes: the innermost node that wholly +// encloses the selection; and the first and last nodes that are +// wholly enclosed by the selection. +// +// For example, given this selection: +// +// { f(); g(); /* comment */ } +// ~~~~~~~~~~~ +// +// Select returns the enclosing BlockStmt, the f() CallExpr, and the g() CallExpr. +// +// Callers that require exactly one syntax tree (e.g. just f() or just +// g()) should check that the returned start and end nodes are +// identical. +// +// This function is intended to be called early in the handling of a +// user's request, since it is tolerant of sloppy selection including +// extraneous whitespace and comments. Use it in new code instead of +// PathEnclosingInterval. When the exact extent of a node is known, +// use [Cursor.FindByPos] instead. +func Select(curFile inspector.Cursor, start, end token.Pos) (_enclosing, _start, _end inspector.Cursor, _ error) { + curEnclosing, ok := curFile.FindByPos(start, end) + if !ok { + return noCursor, noCursor, noCursor, fmt.Errorf("invalid selection") + } + + // Find the first and last node wholly within the (start, end) range. + // We'll narrow the effective selection to them, to exclude whitespace. + // (This matches the functionality of PathEnclosingInterval.) + var curStart, curEnd inspector.Cursor + rng := RangeOf(start, end) + for cur := range curEnclosing.Preorder() { + if rng.Contains(NodeRange(cur.Node())) { + // The start node has the least Pos. + if !CursorValid(curStart) { + curStart = cur + } + // The end node has the greatest End. + // End positions do not change monotonically, + // so we must compute the max. + if !CursorValid(curEnd) || + cur.Node().End() > curEnd.Node().End() { + curEnd = cur + } + } + } + if !CursorValid(curStart) { + return noCursor, noCursor, noCursor, fmt.Errorf("no syntax selected") + } + return curEnclosing, curStart, curEnd, nil +} + +// CursorValid reports whether the cursor is valid. +// +// A valid cursor may yet be the virtual root node, +// cur.Inspector.Root(), which has no [Cursor.Node]. +// +// TODO(adonovan): move to cursorutil package, and move that package into x/tools. +// Ultimately, make this a method of Cursor. Needs a proposal. +func CursorValid(cur inspector.Cursor) bool { + return cur.Inspector() != nil +} + +var noCursor inspector.Cursor diff --git a/vendor/golang.org/x/tools/internal/cfginternal/cfginternal.go b/vendor/golang.org/x/tools/internal/cfginternal/cfginternal.go new file mode 100644 index 0000000000..a9b6236f4d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/cfginternal/cfginternal.go @@ -0,0 +1,16 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cfginternal exposes internals of go/cfg. +// It cannot actually depend on symbols from go/cfg. +package cfginternal + +// IsNoReturn exposes (*cfg.CFG).noReturn to the ctrlflow analyzer. +// TODO(adonovan): add CFG.NoReturn to the public API. +// +// You must link [golang.org/x/tools/go/cfg] into your application for +// this function to be non-nil. +var IsNoReturn = func(cfg any) bool { + panic("golang.org/x/tools/go/cfg not linked into application") +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index 734c46198d..555ef626c0 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -34,7 +34,7 @@ type fileInfo struct { const maxlines = 64 * 1024 func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. + _ = column // TODO(mdempsky): Make use of column. // Since we don't know the set of needed file positions, we reserve maxlines // positions per file. We delay calling token.File.SetLines until all diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 4a4357d2bd..2bef2b058b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -829,8 +829,7 @@ func (p *iexporter) doDecl(obj types.Object) { // their name must be qualified before exporting recv. if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() - for i := 0; i < rparams.Len(); i++ { - rparam := rparams.At(i) + for rparam := range rparams.TypeParams() { name := tparamExportName(prefix, rparam) w.p.tparamNames[rparam.Obj()] = name } @@ -944,6 +943,13 @@ func (w *exportWriter) posV0(pos token.Pos) { } func (w *exportWriter) pkg(pkg *types.Package) { + if pkg == nil { + // [exportWriter.typ] accepts a nil pkg only for types + // of constants, which cannot contain named objects + // such as fields or methods and thus should never + // reach this method (#76222). + panic("nil package") + } // Ensure any referenced packages are declared in the main index. w.p.allPkgs[pkg] = true @@ -959,9 +965,11 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } -// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass -// it in explicitly into signatures and structs that may use it for -// constructing fields. +// typ emits the specified type. +// +// Objects within the type (struct fields and interface methods) are +// qualified by pkg. It may be nil if the type cannot contain objects, +// such as the type of a constant. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -991,6 +999,7 @@ func (w *exportWriter) startType(k itag) { w.data.uint64(uint64(k)) } +// doTyp is the implementation of [exportWriter.typ]. func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { if trace { w.p.trace("exporting type %s (%T)", t, t) @@ -1064,7 +1073,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.pkg(pkg) + w.pkg(pkg) // qualifies param/result vars w.signature(t) case *types.Struct: @@ -1110,19 +1119,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Interface: w.startType(interfaceType) - w.pkg(pkg) + w.pkg(pkg) // qualifies unexported method funcs n := t.NumEmbeddeds() w.uint64(uint64(n)) for i := 0; i < n; i++ { ft := t.EmbeddedType(i) - tPkg := pkg if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { + // e.g. ~int w.pos(token.NoPos) } - w.typ(ft, tPkg) + w.typ(ft, pkg) } // See comment for struct fields. In shallow mode we change the encoding @@ -1223,20 +1232,19 @@ func (w *exportWriter) signature(sig *types.Signature) { func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) - for i := 0; i < ts.Len(); i++ { - w.typ(ts.At(i), pkg) + for t := range ts.Types() { + w.typ(t, pkg) } } func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) + for tparam := range list.TypeParams() { // Set the type parameter exportName before exporting its type. exportName := tparamExportName(prefix, tparam) w.p.tparamNames[tparam.Obj()] = exportName - w.typ(list.At(i), pkg) + w.typ(tparam, pkg) } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 82e6c9d2dc..4d6d50094a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -432,10 +432,10 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { errorf("%v.%v not in index", pkg, name) } - r := &importReader{p: p, currPkg: pkg} + r := &importReader{p: p} r.declReader.Reset(p.declData[off:]) - r.obj(name) + r.obj(pkg, name) } func (p *iimporter) stringAt(off uint64) string { @@ -551,7 +551,6 @@ func canReuse(def *types.Named, rhs types.Type) bool { type importReader struct { p *iimporter declReader bytes.Reader - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -565,7 +564,8 @@ type importReader struct { // for 1.24, but the fix was not worth back-porting). var markBlack = func(name *types.TypeName) {} -func (r *importReader) obj(name string) { +// obj decodes and declares the package-level object denoted by (pkg, name). +func (r *importReader) obj(pkg *types.Package, name string) { tag := r.byte() pos := r.pos() @@ -576,27 +576,27 @@ func (r *importReader) obj(name string) { tparams = r.tparamList() } typ := r.typ() - obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + obj := aliases.NewAlias(r.p.aliases, pos, pkg, name, typ, tparams) markBlack(obj) // workaround for golang/go#69912 r.declare(obj) case constTag: typ, val := r.value() - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + r.declare(types.NewConst(pos, pkg, name, typ, val)) case funcTag, genericFuncTag: var tparams []*types.TypeParam if tag == genericFuncTag { tparams = r.tparamList() } - sig := r.signature(nil, nil, tparams) - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + sig := r.signature(pkg, nil, nil, tparams) + r.declare(types.NewFunc(pos, pkg, name, sig)) case typeTag, genericTypeTag: // Types can be recursive. We need to setup a stub // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) + obj := types.NewTypeName(pos, pkg, name, nil) named := types.NewNamed(obj, nil, nil) markBlack(obj) // workaround for golang/go#69912 @@ -616,7 +616,7 @@ func (r *importReader) obj(name string) { for n := r.uint64(); n > 0; n-- { mpos := r.pos() mname := r.ident() - recv := r.param() + recv := r.param(pkg) // If the receiver has any targs, set those as the // rparams of the method (since those are the @@ -630,9 +630,9 @@ func (r *importReader) obj(name string) { rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } - msig := r.signature(recv, rparams, nil) + msig := r.signature(pkg, recv, rparams, nil) - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + named.AddMethod(types.NewFunc(mpos, pkg, mname, msig)) } } @@ -644,12 +644,12 @@ func (r *importReader) obj(name string) { errorf("unexpected type param type") } name0 := tparamName(name) - tn := types.NewTypeName(pos, r.currPkg, name0, nil) + tn := types.NewTypeName(pos, pkg, name0, nil) t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg, name} + id := ident{pkg, name} r.p.tparamIndex[id] = t var implicit bool if r.p.version >= iexportVersionGo1_18 { @@ -672,7 +672,7 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - v := types.NewVar(pos, r.currPkg, name, typ) + v := types.NewVar(pos, pkg, name, typ) typesinternal.SetVarKind(v, typesinternal.PackageVar) r.declare(v) @@ -905,11 +905,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { case mapType: return types.NewMap(r.typ(), r.typ()) case signatureType: - r.currPkg = r.pkg() - return r.signature(nil, nil, nil) + paramPkg := r.pkg() + return r.signature(paramPkg, nil, nil, nil) case structType: - r.currPkg = r.pkg() + fieldPkg := r.pkg() fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) @@ -932,7 +932,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // discussed in iexport.go, this is not correct, but mostly works and is // preferable to failing (for now at least). if field == nil { - field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + field = types.NewField(fpos, fieldPkg, fname, ftyp, emb) } fields[i] = field @@ -941,7 +941,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { return types.NewStruct(fields, tags) case interfaceType: - r.currPkg = r.pkg() + methodPkg := r.pkg() // qualifies methods and their param/result vars embeddeds := make([]types.Type, r.uint64()) for i := range embeddeds { @@ -963,12 +963,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // don't agree with this. var recv *types.Var if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) + recv = types.NewVar(token.NoPos, methodPkg, "", base) } - msig := r.signature(recv, nil, nil) + msig := r.signature(methodPkg, recv, nil, nil) if method == nil { - method = types.NewFunc(mpos, r.currPkg, mname, msig) + method = types.NewFunc(mpos, methodPkg, mname, msig) } methods[i] = method } @@ -1049,9 +1049,9 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { - params := r.paramList() - results := r.paramList() +func (r *importReader) signature(paramPkg *types.Package, recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { + params := r.paramList(paramPkg) + results := r.paramList(paramPkg) variadic := params.Len() > 0 && r.bool() return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } @@ -1070,19 +1070,19 @@ func (r *importReader) tparamList() []*types.TypeParam { return xs } -func (r *importReader) paramList() *types.Tuple { +func (r *importReader) paramList(pkg *types.Package) *types.Tuple { xs := make([]*types.Var, r.uint64()) for i := range xs { - xs[i] = r.param() + xs[i] = r.param(pkg) } return types.NewTuple(xs...) } -func (r *importReader) param() *types.Var { +func (r *importReader) param(pkg *types.Package) *types.Var { pos := r.pos() name := r.ident() typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) + return types.NewParam(pos, pkg, name, typ) } func (r *importReader) bool() bool { diff --git a/vendor/golang.org/x/tools/internal/goplsexport/export.go b/vendor/golang.org/x/tools/internal/goplsexport/export.go index d7c2b9f3c8..bca4d8a0b0 100644 --- a/vendor/golang.org/x/tools/internal/goplsexport/export.go +++ b/vendor/golang.org/x/tools/internal/goplsexport/export.go @@ -10,5 +10,7 @@ import "golang.org/x/tools/go/analysis" var ( ErrorsAsTypeModernizer *analysis.Analyzer // = modernize.errorsastypeAnalyzer - StdIteratorsModernizer *analysis.Analyzer // = modernize.stditeratorsAnalyer + StdIteratorsModernizer *analysis.Analyzer // = modernize.stditeratorsAnalyzer + PlusBuildModernizer *analysis.Analyzer // = modernize.plusbuildAnalyzer + StringsCutModernizer *analysis.Analyzer // = modernize.stringscutAnalyzer ) diff --git a/vendor/golang.org/x/tools/internal/packagepath/packagepath.go b/vendor/golang.org/x/tools/internal/packagepath/packagepath.go new file mode 100644 index 0000000000..fa39a13f9e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagepath/packagepath.go @@ -0,0 +1,49 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagepath provides metadata operations on package path +// strings. +package packagepath + +// (This package should not depend on go/ast.) +import "strings" + +// CanImport reports whether one package is allowed to import another. +// +// TODO(adonovan): allow customization of the accessibility relation +// (e.g. for Bazel). +func CanImport(from, to string) bool { + // TODO(adonovan): better segment hygiene. + if to == "internal" || strings.HasPrefix(to, "internal/") { + // Special case: only std packages may import internal/... + // We can't reliably know whether we're in std, so we + // use a heuristic on the first segment. + first, _, _ := strings.Cut(from, "/") + if strings.Contains(first, ".") { + return false // example.com/foo ∉ std + } + if first == "testdata" { + return false // testdata/foo ∉ std + } + } + if strings.HasSuffix(to, "/internal") { + return strings.HasPrefix(from, to[:len(to)-len("/internal")]) + } + if i := strings.LastIndex(to, "/internal/"); i >= 0 { + return strings.HasPrefix(from, to[:i]) + } + return true +} + +// IsStdPackage reports whether the specified package path belongs to a +// package in the standard library (including internal dependencies). +func IsStdPackage(path string) bool { + // A standard package has no dot in its first segment. + // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) + slash := strings.IndexByte(path, '/') + if slash < 0 { + slash = len(path) + } + return !strings.Contains(path[:slash], ".") && path != "testdata" +} diff --git a/vendor/golang.org/x/tools/internal/refactor/delete.go b/vendor/golang.org/x/tools/internal/refactor/delete.go index aa8ba5af4c..9b96b1dbf1 100644 --- a/vendor/golang.org/x/tools/internal/refactor/delete.go +++ b/vendor/golang.org/x/tools/internal/refactor/delete.go @@ -18,10 +18,11 @@ import ( "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" ) -// DeleteVar returns edits to delete the declaration of a variable -// whose defining identifier is curId. +// DeleteVar returns edits to delete the declaration of a variable or +// constant whose defining identifier is curId. // // It handles variants including: // - GenDecl > ValueSpec versus AssignStmt; @@ -45,6 +46,9 @@ func DeleteVar(tokFile *token.File, info *types.Info, curId inspector.Cursor) [] return nil } +// deleteVarFromValueSpec returns edits to delete the declaration of a +// variable or constant within a ValueSpec. +// // Precondition: curId is Ident beneath ValueSpec.Names beneath GenDecl. // // See also [deleteVarFromAssignStmt], which has parallel structure. @@ -97,7 +101,7 @@ func deleteVarFromValueSpec(tokFile *token.File, info *types.Info, curIdent insp }} } - // If the assignment is 1:1 and the RHS has no effects, + // If the assignment is n:n and the RHS has no effects, // we can delete the LHS and its corresponding RHS. if len(spec.Names) == len(spec.Values) && typesinternal.NoEffects(info, spec.Values[index]) { @@ -239,12 +243,12 @@ func deleteVarFromAssignStmt(tokFile *token.File, info *types.Info, curIdent ins return edits } -// DeleteSpec returns edits to delete the ValueSpec identified by curSpec. +// DeleteSpec returns edits to delete the {Type,Value}Spec identified by curSpec. // // TODO(adonovan): add test suite. Test for consts as well. func DeleteSpec(tokFile *token.File, curSpec inspector.Cursor) []analysis.TextEdit { var ( - spec = curSpec.Node().(*ast.ValueSpec) + spec = curSpec.Node().(ast.Spec) curDecl = curSpec.Parent() decl = curDecl.Node().(*ast.GenDecl) ) @@ -258,14 +262,14 @@ func DeleteSpec(tokFile *token.File, curSpec inspector.Cursor) []analysis.TextEd // Delete the spec and its comments. _, index := curSpec.ParentEdge() // index of ValueSpec within GenDecl.Specs pos, end := spec.Pos(), spec.End() - if spec.Doc != nil { - pos = spec.Doc.Pos() // leading comment + if doc := astutil.DocComment(spec); doc != nil { + pos = doc.Pos() // leading comment } if index == len(decl.Specs)-1 { // Delete final spec. - if spec.Comment != nil { + if c := eolComment(spec); c != nil { // var (v int // comment \n) - end = spec.Comment.End() + end = c.End() } } else { // Delete non-final spec. @@ -327,107 +331,237 @@ func DeleteDecl(tokFile *token.File, curDecl inspector.Cursor) []analysis.TextEd } } +// find leftmost Pos bigger than start and rightmost less than end +func filterPos(nds []*ast.Comment, start, end token.Pos) (token.Pos, token.Pos, bool) { + l, r := end, token.NoPos + ok := false + for _, n := range nds { + if n.Pos() > start && n.Pos() < l { + l = n.Pos() + ok = true + } + if n.End() <= end && n.End() > r { + r = n.End() + ok = true + } + } + return l, r, ok +} + // DeleteStmt returns the edits to remove the [ast.Stmt] identified by -// curStmt, if it is contained within a BlockStmt, CaseClause, -// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise. -func DeleteStmt(tokFile *token.File, curStmt inspector.Cursor) []analysis.TextEdit { - stmt := curStmt.Node().(ast.Stmt) - // if the stmt is on a line by itself delete the whole line - // otherwise just delete the statement. - - // this logic would be a lot simpler with the file contents, and somewhat simpler - // if the cursors included the comments. - - lineOf := tokFile.Line - stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End()) - - var from, to token.Pos - // bounds of adjacent syntax/comments on same line, if any - limits := func(left, right token.Pos) { +// curStmt if it recognizes the context. It returns nil otherwise. +// TODO(pjw, adonovan): it should not return nil, it should return an error +// +// DeleteStmt is called with just the AST so it has trouble deciding if +// a comment is associated with the statement to be deleted. For instance, +// +// for /*A*/ init()/*B*/;/*C/cond()/*D/;/*E*/post() /*F*/ { /*G*/} +// +// comment B and C are indistinguishable, as are D and E. That is, as the +// AST does not say where the semicolons are, B and C could go either +// with the init() or the cond(), so cannot be removed safely. The same +// is true for D, E, and the post(). (And there are other similar cases.) +// But the other comments can be removed as they are unambiguously +// associated with the statement being deleted. In particular, +// it removes whole lines like +// +// stmt // comment +func DeleteStmt(file *token.File, curStmt inspector.Cursor) []analysis.TextEdit { + // if the stmt is on a line by itself, or a range of lines, delete the whole thing + // including comments. Except for the heads of switches, type + // switches, and for-statements that's the usual case. Complexity occurs where + // there are multiple statements on the same line, and adjacent comments. + + // In that case we remove some adjacent comments: + // In me()/*A*/;b(), comment A cannot be removed, because the ast + // is indistinguishable from me();/*A*/b() + // and the same for cases like switch me()/*A*/; x.(type) { + + // this would be more precise with the file contents, or if the ast + // contained the location of semicolons + var ( + stmt = curStmt.Node().(ast.Stmt) + tokFile = file + lineOf = tokFile.Line + stmtStartLine = lineOf(stmt.Pos()) + stmtEndLine = lineOf(stmt.End()) + + leftSyntax, rightSyntax token.Pos // pieces of parent node on stmt{Start,End}Line + leftComments, rightComments []*ast.Comment // comments before/after stmt on the same line + ) + + // remember the Pos that are on the same line as stmt + use := func(left, right token.Pos) { if lineOf(left) == stmtStartLine { - from = left + leftSyntax = left } if lineOf(right) == stmtEndLine { - to = right + rightSyntax = right } } - // TODO(pjw): there are other places a statement might be removed: - // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] . - // (removing the blocks requires more rewriting than this routine would do) - // CommCase = "case" ( SendStmt | RecvStmt ) | "default" . - // (removing the stmt requires more rewriting, and it's unclear what the user means) - switch parent := curStmt.Parent().Node().(type) { - case *ast.SwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - case *ast.TypeSwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - if parent.Assign == stmt { - return nil // don't let the user break the type switch + + // find the comments, if any, on the same line +Big: + for _, cg := range astutil.EnclosingFile(curStmt).Comments { + for _, co := range cg.List { + if lineOf(co.End()) < stmtStartLine { + continue + } else if lineOf(co.Pos()) > stmtEndLine { + break Big // no more are possible + } + if lineOf(co.End()) == stmtStartLine && co.End() <= stmt.Pos() { + // comment is before the statement + leftComments = append(leftComments, co) + } else if lineOf(co.Pos()) == stmtEndLine && co.Pos() >= stmt.End() { + // comment is after the statement + rightComments = append(rightComments, co) + } } + } + + // find any other syntax on the same line + var ( + leftStmt, rightStmt token.Pos // end/start positions of sibling statements in a []Stmt list + inStmtList = false + curParent = curStmt.Parent() + ) + switch parent := curParent.Node().(type) { case *ast.BlockStmt: - limits(parent.Lbrace, parent.Rbrace) + use(parent.Lbrace, parent.Rbrace) + inStmtList = true + case *ast.CaseClause: + use(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + inStmtList = true case *ast.CommClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) if parent.Comm == stmt { return nil // maybe the user meant to remove the entire CommClause? } - case *ast.CaseClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + use(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + inStmtList = true case *ast.ForStmt: - limits(parent.For, parent.Body.Lbrace) - + use(parent.For, parent.Body.Lbrace) + // special handling, as init;cond;post BlockStmt is not a statment list + if parent.Init != nil && parent.Cond != nil && stmt == parent.Init && lineOf(parent.Cond.Pos()) == lineOf(stmt.End()) { + rightStmt = parent.Cond.Pos() + } else if parent.Post != nil && parent.Cond != nil && stmt == parent.Post && lineOf(parent.Cond.End()) == lineOf(stmt.Pos()) { + leftStmt = parent.Cond.End() + } + case *ast.IfStmt: + switch stmt { + case parent.Init: + use(parent.If, parent.Body.Lbrace) + case parent.Else: + // stmt is the {...} in "if cond {} else {...}" and removing + // it would require removing the 'else' keyword, but the ast + // does not contain its position. + return nil + } + case *ast.SwitchStmt: + use(parent.Switch, parent.Body.Lbrace) + case *ast.TypeSwitchStmt: + if stmt == parent.Assign { + return nil // don't remove .(type) + } + use(parent.Switch, parent.Body.Lbrace) default: return nil // not one of ours } - if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { - from = prev.Node().End() // preceding statement ends on same line + if inStmtList { + // find the siblings, if any, on the same line + if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { + if _, ok := prev.Node().(ast.Stmt); ok { + leftStmt = prev.Node().End() // preceding statement ends on same line + } + } + if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { + rightStmt = next.Node().Pos() // following statement begins on same line + } } - if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { - to = next.Node().Pos() // following statement begins on same line + + // compute the left and right limits of the edit + var leftEdit, rightEdit token.Pos + if leftStmt.IsValid() { + leftEdit = stmt.Pos() // can't remove preceding comments: a()/*A*/; me() + } else if leftSyntax.IsValid() { + // remove intervening leftComments + if a, _, ok := filterPos(leftComments, leftSyntax, stmt.Pos()); ok { + leftEdit = a + } else { + leftEdit = stmt.Pos() + } + } else { // remove whole line + for leftEdit = stmt.Pos(); lineOf(leftEdit) == stmtStartLine; leftEdit-- { + } + if leftEdit < stmt.Pos() { + leftEdit++ // beginning of line + } } - // and now for the comments -Outer: - for _, cg := range astutil.EnclosingFile(curStmt).Comments { - for _, co := range cg.List { - if lineOf(co.End()) < stmtStartLine { - continue - } else if lineOf(co.Pos()) > stmtEndLine { - break Outer // no more are possible - } - if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() { - if !from.IsValid() || co.End() > from { - from = co.End() - continue // maybe there are more - } - } - if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() { - if !to.IsValid() || co.Pos() < to { - to = co.Pos() - continue // maybe there are more - } - } + if rightStmt.IsValid() { + rightEdit = stmt.End() // can't remove following comments + } else if rightSyntax.IsValid() { + // remove intervening rightComments + if _, b, ok := filterPos(rightComments, stmt.End(), rightSyntax); ok { + rightEdit = b + } else { + rightEdit = stmt.End() + } + } else { // remove whole line + fend := token.Pos(file.Base()) + token.Pos(file.Size()) + for rightEdit = stmt.End(); fend >= rightEdit && lineOf(rightEdit) == stmtEndLine; rightEdit++ { + } + // don't remove \n if there was other stuff earlier + if leftSyntax.IsValid() || leftStmt.IsValid() { + rightEdit-- } } - // if either from or to is valid, just remove the statement - // otherwise remove the line - edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()} - if from.IsValid() || to.IsValid() { - // remove just the statement. - // we can't tell if there is a ; or whitespace right after the statement - // ideally we'd like to remove the former and leave the latter - // (if gofmt has run, there likely won't be a ;) - // In type switches we know there's a semicolon somewhere after the statement, - // but the extra work for this special case is not worth it, as gofmt will fix it. - return []analysis.TextEdit{edit} + + return []analysis.TextEdit{{Pos: leftEdit, End: rightEdit}} +} + +// DeleteUnusedVars computes the edits required to delete the +// declarations of any local variables whose last uses are in the +// curDelend subtree, which is about to be deleted. +func DeleteUnusedVars(index *typeindex.Index, info *types.Info, tokFile *token.File, curDelend inspector.Cursor) []analysis.TextEdit { + // TODO(adonovan): we might want to generalize this by + // splitting the two phases below, so that we can gather + // across a whole sequence of deletions then finally compute the + // set of variables that are no longer wanted. + + // Count number of deletions of each var. + delcount := make(map[*types.Var]int) + for curId := range curDelend.Preorder((*ast.Ident)(nil)) { + id := curId.Node().(*ast.Ident) + if v, ok := info.Uses[id].(*types.Var); ok && + typesinternal.GetVarKind(v) == typesinternal.LocalVar { // always false before go1.25 + delcount[v]++ + } } - // remove the whole line - for lineOf(edit.Pos) == stmtStartLine { - edit.Pos-- + + // Delete declaration of each var that became unused. + var edits []analysis.TextEdit + for v, count := range delcount { + if len(slices.Collect(index.Uses(v))) == count { + if curDefId, ok := index.Def(v); ok { + edits = append(edits, DeleteVar(tokFile, info, curDefId)...) + } + } } - edit.Pos++ // get back tostmtStartLine - for lineOf(edit.End) == stmtEndLine { - edit.End++ + return edits +} + +func eolComment(n ast.Node) *ast.CommentGroup { + // TODO(adonovan): support: + // func f() {...} // comment + switch n := n.(type) { + case *ast.GenDecl: + if !n.TokPos.IsValid() && len(n.Specs) == 1 { + return eolComment(n.Specs[0]) + } + case *ast.ValueSpec: + return n.Comment + case *ast.TypeSpec: + return n.Comment } - return []analysis.TextEdit{edit} + return nil } diff --git a/vendor/golang.org/x/tools/internal/refactor/imports.go b/vendor/golang.org/x/tools/internal/refactor/imports.go index 1ba3a9609f..b5440d896b 100644 --- a/vendor/golang.org/x/tools/internal/refactor/imports.go +++ b/vendor/golang.org/x/tools/internal/refactor/imports.go @@ -14,7 +14,7 @@ import ( pathpkg "path" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/packagepath" ) // AddImport returns the prefix (either "pkg." or "") that should be @@ -97,13 +97,13 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member } if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { // Have existing grouped import ( ... ) decl. - if analysisinternal.IsStdPackage(pkgpath) && len(gd.Specs) > 0 { + if packagepath.IsStdPackage(pkgpath) && len(gd.Specs) > 0 { // Add spec for a std package before // first existing spec, followed by // a blank line if the next one is non-std. first := gd.Specs[0].(*ast.ImportSpec) pos = first.Pos() - if !analysisinternal.IsStdPackage(first.Path.Value) { + if !packagepath.IsStdPackage(first.Path.Value) { newText += "\n" } newText += "\n\t" diff --git a/vendor/golang.org/x/tools/internal/refactor/refactor.go b/vendor/golang.org/x/tools/internal/refactor/refactor.go index 27b9750896..26bc079808 100644 --- a/vendor/golang.org/x/tools/internal/refactor/refactor.go +++ b/vendor/golang.org/x/tools/internal/refactor/refactor.go @@ -17,6 +17,11 @@ import ( // FreshName returns the name of an identifier that is undefined // at the specified position, based on the preferred name. +// +// TODO(adonovan): refine this to choose a fresh name only when there +// would be a conflict with the existing declaration: it's fine to +// redeclare a name in a narrower scope so long as there are no free +// references to the outer name from within the narrower scope. func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { newName := preferred for i := 0; ; i++ { diff --git a/vendor/golang.org/x/tools/internal/ssainternal/ssainternal.go b/vendor/golang.org/x/tools/internal/ssainternal/ssainternal.go new file mode 100644 index 0000000000..686c5d97d2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/ssainternal/ssainternal.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ssainternal exposes setters for internals of go/ssa. +// It cannot actually depend on symbols from go/ssa. +package ssainternal + +import "go/types" + +// SetNoReturn sets the predicate used when building the ssa.Program +// prog that reports whether a given function cannot return. +// This may be used to prune spurious control flow edges +// after (e.g.) log.Fatal, improving the precision of analyses. +// +// You must link [golang.org/x/tools/go/ssa] into your application for +// this function to be non-nil. +// +// TODO(adonovan): add (*ssa.Program).SetNoReturn to the public API. +var SetNoReturn = func(prog any, noreturn func(*types.Func) bool) { + panic("golang.org/x/tools/go/ssa not linked into application") +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index 96ad6c5821..581784da43 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,354 +12,508 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03k\x83\x01D\x14"}, - {"bytes", "n*Y\x03\fG\x02\x02"}, + {"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, + {"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, + {"bufio", "\x03n\x84\x01D\x14"}, + {"bytes", "q*Z\x03\fG\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xed\x01A"}, - {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04a\a\x03\x14lT"}, - {"compress/lzw", "\x02l\x03\x80\x01"}, - {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"}, - {"container/heap", "\xb3\x02"}, + {"compress/bzip2", "\x02\x02\xf1\x01A"}, + {"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04d\a\x03\x14mT"}, + {"compress/lzw", "\x02o\x03\x81\x01"}, + {"compress/zlib", "\x02\x04d\a\x03\x12\x01n"}, + {"container/heap", "\xb7\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "n\\m\x01\r"}, - {"crypto", "\x83\x01nC"}, - {"crypto/aes", "\x10\n\a\x93\x02"}, - {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"}, - {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"}, - {"crypto/dsa", "A\x04)\x83\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"}, - {"crypto/elliptic", "0>\x83\x01\r9"}, - {"crypto/fips140", " \x05"}, - {"crypto/hkdf", "-\x13\x01-\x15"}, - {"crypto/hmac", "\x1a\x14\x12\x01\x111"}, - {"crypto/internal/boring", "\x0e\x02\rf"}, - {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"}, - {"crypto/internal/boring/bcache", "\xb8\x02\x13"}, + {"context", "q[o\x01\r"}, + {"crypto", "\x86\x01oC"}, + {"crypto/aes", "\x10\n\t\x95\x02"}, + {"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"}, + {"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"}, + {"crypto/dsa", "D\x04)\x84\x01\r"}, + {"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"}, + {"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"}, + {"crypto/elliptic", "2?\x84\x01\r9"}, + {"crypto/fips140", "\"\x05"}, + {"crypto/hkdf", "/\x14\x01-\x15"}, + {"crypto/hmac", "\x1a\x16\x13\x01\x111"}, + {"crypto/internal/boring", "\x0e\x02\ri"}, + {"crypto/internal/boring/bbig", "\x1a\xe8\x01M"}, + {"crypto/internal/boring/bcache", "\xbc\x02\x13"}, {"crypto/internal/boring/sig", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "F"}, - {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"}, - {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"}, - {"crypto/internal/fips140/alias", "\xcb\x02"}, - {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"}, - {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"}, - {"crypto/internal/fips140/check/checktest", "%\x85\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"}, - {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"}, - {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"}, - {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"}, - {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/ssh", "%^"}, - {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"}, - {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"}, - {"crypto/internal/fips140cache", "\xaa\x02\r&"}, + {"crypto/internal/constanttime", ""}, + {"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, + {"crypto/internal/entropy", "I"}, + {"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"}, + {"crypto/internal/fips140", "A0\xbd\x01\v\x16"}, + {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"}, + {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"}, + {"crypto/internal/fips140/alias", "\xcf\x02"}, + {"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"}, + {"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"}, + {"crypto/internal/fips140/check/checktest", "'\x87\x02!"}, + {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"}, + {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"}, + {"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"}, + {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"}, + {"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"}, + {"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"}, + {"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/ssh", "'_"}, + {"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"}, + {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"}, + {"crypto/internal/fips140cache", "\xae\x02\r&"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x99\x01"}, - {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, - {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"}, - {"crypto/internal/fips140only", "'\r\x01\x01M3;"}, + {"crypto/internal/fips140deps/byteorder", "\x9c\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb1\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb9\x01"}, + {"crypto/internal/fips140deps/time", "\xc9\x02"}, + {"crypto/internal/fips140hash", "7\x1c3\xc9\x01"}, + {"crypto/internal/fips140only", ")\r\x01\x01N3<"}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"}, - {"crypto/internal/impl", "\xb5\x02"}, - {"crypto/internal/randutil", "\xf1\x01\x12"}, - {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "n"}, - {"crypto/md5", "\x0e3-\x15\x16g"}, - {"crypto/mlkem", "/"}, - {"crypto/pbkdf2", "2\x0e\x01-\x15"}, - {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"}, - {"crypto/rc4", "#\x1e-\xc6\x01"}, - {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"}, - {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"}, - {"crypto/sha256", "\x0e\f\x1aO"}, - {"crypto/sha3", "\x0e'N\xc8\x01"}, - {"crypto/sha512", "\x0e\f\x1cM"}, - {"crypto/subtle", "8\x9b\x01W"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "d\x06\a\x8d\x01G"}, - {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"}, - {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"}, - {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"}, - {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"}, - {"debug/plan9obj", "g\a\x03e\x1b,"}, - {"embed", "n*@\x19\x01S"}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"}, + {"crypto/internal/impl", "\xb9\x02"}, + {"crypto/internal/randutil", "\xf5\x01\x12"}, + {"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "q"}, + {"crypto/md5", "\x0e6-\x15\x16h"}, + {"crypto/mlkem", "1"}, + {"crypto/pbkdf2", "4\x0f\x01-\x15"}, + {"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"}, + {"crypto/rc4", "%\x1f-\xc7\x01"}, + {"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"}, + {"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"}, + {"crypto/sha256", "\x0e\f\x1cP"}, + {"crypto/sha3", "\x0e)O\xc9\x01"}, + {"crypto/sha512", "\x0e\f\x1eN"}, + {"crypto/subtle", "\x1e\x1c\x9c\x01X"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xa5\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "g\x06\a\x8e\x01G"}, + {"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, + {"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"}, + {"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"}, + {"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"}, + {"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"}, + {"debug/plan9obj", "j\a\x03e\x1c,"}, + {"embed", "q*A\x19\x01S"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf1\x01C"}, - {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf1\x01A\x02"}, - {"encoding/base64", "\x99\x01XA\x02"}, - {"encoding/binary", "n\x83\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"}, - {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "n\x03\x80\x01A\x03"}, - {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03c\b\x83\x01A\x03"}, - {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xca\x01\x81\x01"}, - {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "nE>\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"}, - {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "n\xc6\x01\x01\x12\x02"}, - {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03n\x01\v\x01\x02qD"}, - {"go/importer", "s\a\x01\x01\x04\x01p9"}, - {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"}, - {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"}, - {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"}, - {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xbb\x01z"}, - {"hash", "\xf1\x01"}, - {"hash/adler32", "n\x15\x16"}, - {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"}, - {"hash/crc64", "n\x15\x16\x9e\x01"}, - {"hash/fnv", "n\x15\x16g"}, - {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"}, - {"html", "\xb5\x02\x02\x12"}, - {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02l\x1ee\x0f4\x03\x01"}, + {"encoding/ascii85", "\xf5\x01C"}, + {"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"}, + {"encoding/base32", "\xf5\x01A\x02"}, + {"encoding/base64", "\x9c\x01YA\x02"}, + {"encoding/binary", "q\x84\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"}, + {"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, + {"encoding/hex", "q\x03\x81\x01A\x03"}, + {"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, + {"encoding/pem", "\x03f\b\x84\x01A\x03"}, + {"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"}, + {"errors", "\xcc\x01\x83\x01"}, + {"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"}, + {"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"}, + {"fmt", "qE&\x19\f \b\r\x02\x03\x12"}, + {"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"}, + {"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, + {"go/build/constraint", "q\xc7\x01\x01\x12\x02"}, + {"go/constant", "t\x0f~\x01\x024\x01\x02\x12"}, + {"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"}, + {"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"}, + {"go/format", "\x03q\x01\v\x01\x02rD"}, + {"go/importer", "v\a\x01\x01\x04\x01q9"}, + {"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"}, + {"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"}, + {"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"}, + {"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"}, + {"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"}, + {"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"}, + {"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"}, + {"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, + {"go/version", "\xbe\x01{"}, + {"hash", "\xf5\x01"}, + {"hash/adler32", "q\x15\x16"}, + {"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"}, + {"hash/crc64", "q\x15\x16\x9f\x01"}, + {"hash/fnv", "q\x15\x16h"}, + {"hash/maphash", "\x86\x01\x11<|"}, + {"html", "\xb9\x02\x02\x12"}, + {"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, + {"image", "\x02o\x1ef\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x8c\x01"}, - {"image/draw", "\x8b\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"}, - {"image/internal/imageutil", "\x8b\x01"}, - {"image/jpeg", "\x02l\x1d\x01\x04a"}, - {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"}, - {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"}, - {"internal/abi", "\xb5\x01\x96\x01"}, - {"internal/asan", "\xcb\x02"}, - {"internal/bisect", "\xaa\x02\r\x01"}, - {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xae\x01\x9d\x01"}, + {"image/color/palette", "\x8f\x01"}, + {"image/draw", "\x8e\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"}, + {"image/internal/imageutil", "\x8e\x01"}, + {"image/jpeg", "\x02o\x1d\x01\x04b"}, + {"image/png", "\x02\aa\n\x12\x02\x06\x01fC"}, + {"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"}, + {"internal/abi", "\xb8\x01\x97\x01"}, + {"internal/asan", "\xcf\x02"}, + {"internal/bisect", "\xae\x02\r\x01"}, + {"internal/buildcfg", "tGf\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb1\x01\x9e\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"}, + {"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "q-_"}, - {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"}, - {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."}, - {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xcb\x02"}, - {"internal/coverage/slicereader", "g\n\x80\x01Z"}, - {"internal/coverage/slicewriter", "q\x80\x01"}, - {"internal/coverage/stringtab", "q8\x04D"}, + {"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"}, + {"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"}, + {"internal/coverage/cmerge", "t-`"}, + {"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"}, + {"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"}, + {"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."}, + {"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xcf\x02"}, + {"internal/coverage/slicereader", "j\n\x81\x01Z"}, + {"internal/coverage/slicewriter", "t\x81\x01"}, + {"internal/coverage/stringtab", "t8\x04E"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xcb\x02"}, - {"internal/dag", "\x04m\xc1\x01\x03"}, - {"internal/diff", "\x03n\xc2\x01\x02"}, - {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "n*@\x1a@"}, - {"internal/fmtsort", "\x04\xa1\x02\r"}, - {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xcf\x02"}, + {"internal/dag", "\x04p\xc2\x01\x03"}, + {"internal/diff", "\x03q\xc3\x01\x02"}, + {"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "q*A\x1a@"}, + {"internal/fmtsort", "\x04\xa5\x02\r"}, + {"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"}, + {"internal/godebug", "\x99\x01!\x81\x01\x01\x13"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa1\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/itoa", ""}, - {"internal/lazyregexp", "\x9d\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"}, - {"internal/msan", "\xcb\x02"}, + {"internal/lazyregexp", "\xa1\x02\v\r\x02"}, + {"internal/lazytemplate", "\xf5\x01,\x18\x02\f"}, + {"internal/msan", "\xcf\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "f\x8b\x01,"}, - {"internal/oserror", "n"}, - {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "i\x8c\x01,"}, + {"internal/oserror", "q"}, + {"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"}, + {"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"}, + {"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"}, {"internal/profilerecord", ""}, - {"internal/race", "\x94\x01\xb7\x01"}, - {"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, - {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, - {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"}, - {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"}, - {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, - {"net/http/httptrace", "\rFnF\x14\n "}, - {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, - {"net/http/internal", "\x02\x01k\x03\x80\x01"}, - {"net/http/internal/ascii", "\xb5\x02\x12"}, - {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, - {"net/http/internal/testcert", "\xb5\x02"}, - {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, + {"log/slog/internal/benchmarks", "\rd\x03\x81\x01\x06\x03:\x11"}, + {"log/slog/internal/buffer", "\xbb\x02"}, + {"log/syslog", "q\x03\x85\x01\x12\x16\x18\x02\x0e"}, + {"maps", "\xf8\x01W"}, + {"math", "\xb1\x01SK"}, + {"math/big", "\x03n\x03(\x15D\f\x03\x020\x02\x01\x02\x14"}, + {"math/big/internal/asmgen", "\x03\x01p\x90\x012\x03"}, + {"math/bits", "\xcf\x02"}, + {"math/cmplx", "\x81\x02\x03"}, + {"math/rand", "\xb9\x01H:\x01\x13"}, + {"math/rand/v2", "q+\x03b\x03K"}, + {"mime", "\x02\x01f\b\x03\x81\x01\v!\x15\x03\x02\x10\x02"}, + {"mime/multipart", "\x02\x01K#\x03E<\v\x01\a\x02\x15\x02\x06\x0f\x02\x01\x16"}, + {"mime/quotedprintable", "\x02\x01q\x81\x01"}, + {"net", "\x04\td*\x1e\n\x05\x12\x01\x01\x04\x15\x01%\x06\r\b\x05\x01\x01\f\x06\a"}, + {"net/http", "\x02\x01\x03\x01\x04\x02A\b\x13\x01\a\x03E<\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, + {"net/http/cgi", "\x02T\x1b\x03\x81\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, + {"net/http/cookiejar", "\x04m\x03\x97\x01\x01\b\f\x16\x03\x02\x0e\x04"}, + {"net/http/fcgi", "\x02\x01\n]\a\x03\x81\x01\x16\x01\x01\x14\x18\x02\x0e"}, + {"net/http/httptest", "\x02\x01\nI\x02\x1b\x01\x81\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, + {"net/http/httptrace", "\rImH\x14\n "}, + {"net/http/httputil", "\x02\x01\nd\x03\x81\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, + {"net/http/internal", "\x02\x01n\x03\x81\x01"}, + {"net/http/internal/ascii", "\xb9\x02\x12"}, + {"net/http/internal/httpcommon", "\rd\x03\x9d\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, + {"net/http/internal/testcert", "\xb9\x02"}, + {"net/http/pprof", "\x02\x01\ng\x18-\x02\x0e,\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, {"net/internal/cgotest", ""}, - {"net/internal/socktest", "q\xc6\x01\x02"}, - {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, - {"net/netip", "\x04j*\x01$@\x034\x16"}, - {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"}, - {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"}, - {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"}, - {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"}, - {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"}, - {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, - {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"}, - {"os/exec/internal/fdtest", "\xb9\x02"}, - {"os/signal", "\r\x90\x02\x15\x05\x02"}, - {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"}, - {"path", "n*\xb1\x01"}, - {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"}, - {"plugin", "n"}, - {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"}, + {"net/internal/socktest", "t\xc7\x01\x02"}, + {"net/mail", "\x02o\x03\x81\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, + {"net/netip", "\x04m*\x01e\x034\x16"}, + {"net/rpc", "\x02j\x05\x03\x0f\nh\x04\x12\x01\x1d\r\x03\x02"}, + {"net/rpc/jsonrpc", "n\x03\x03\x81\x01\x16\x11\x1f"}, + {"net/smtp", "\x192\v\x13\b\x03\x81\x01\x16\x14\x1a"}, + {"net/textproto", "\x02\x01n\x03\x81\x01\f\n-\x01\x02\x14"}, + {"net/url", "q\x03\xa7\x01\v\x10\x02\x01\x16"}, + {"os", "q*\x01\x19\x03\x10\x13\x01\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, + {"os/exec", "\x03\ndH&\x01\x15\x01+\x06\a\n\x01\x04\f"}, + {"os/exec/internal/fdtest", "\xbd\x02"}, + {"os/signal", "\r\x94\x02\x15\x05\x02"}, + {"os/user", "\x02\x01n\x03\x81\x01,\r\n\x01\x02"}, + {"path", "q*\xb2\x01"}, + {"path/filepath", "q*\x1aA+\r\b\x03\x04\x10"}, + {"plugin", "q"}, + {"reflect", "q&\x04\x1d\x13\b\x03\x05\x17\x06\t-\n\x03\x10\x02\x02"}, {"reflect/internal/example1", ""}, {"reflect/internal/example2", ""}, - {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"}, - {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"}, - {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"}, - {"runtime/coverage", "\xa0\x01Q"}, - {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"}, - {"runtime/metrics", "\xb7\x01F-!"}, - {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, - {"runtime/race", "\xb0\x02"}, + {"regexp", "\x03\xf2\x018\t\x02\x01\x02\x10\x02"}, + {"regexp/syntax", "\xb6\x02\x01\x01\x01\x02\x10\x02"}, + {"runtime", "\x97\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0e\x03\x01\x01\x01\x02\x01\x01\x02\x01\x04\x01\x10c"}, + {"runtime/coverage", "\xa3\x01R"}, + {"runtime/debug", "tTY\r\b\x02\x01\x10\x06"}, + {"runtime/metrics", "\xba\x01G-!"}, + {"runtime/pprof", "\x02\x01\x01\x03\x06]\a\x03#$\x0f+\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, + {"runtime/race", "\xb4\x02"}, {"runtime/race/internal/amd64v1", ""}, - {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"}, - {"slices", "\x04\xf0\x01\fK"}, - {"sort", "\xca\x0162"}, - {"strconv", "n*@%\x03I"}, - {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"}, + {"runtime/trace", "\rd\x03x\t9\b\x05\x01\r\x06"}, + {"slices", "\x04\xf4\x01\fK"}, + {"sort", "\xcc\x0182"}, + {"strconv", "q*@\x01q"}, + {"strings", "q&\x04A\x19\x03\f7\x10\x02\x02"}, {"structs", ""}, - {"sync", "\xc9\x01\x10\x01P\x0e\x13"}, - {"sync/atomic", "\xcb\x02"}, - {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"}, - {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"}, - {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"}, - {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"}, - {"testing/iotest", "\x03k\x03\x80\x01\x04"}, - {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"}, - {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"}, - {"testing/synctest", "\xda\x01`\x11"}, - {"text/scanner", "\x03n\x80\x01,*\x02"}, - {"text/tabwriter", "q\x80\x01X"}, - {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, - {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"}, - {"time", "n*\x1e\"(*\r\x02\x12"}, - {"time/tzdata", "n\xcb\x01\x12"}, + {"sync", "\xcb\x01\x12\x01P\x0e\x13"}, + {"sync/atomic", "\xcf\x02"}, + {"syscall", "q'\x03\x01\x1c\n\x03\x06\f\x04S\b\x05\x01\x13"}, + {"testing", "\x03\nd\x02\x01W\x16\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x04"}, + {"testing/fstest", "q\x03\x81\x01\x01\n&\x10\x03\b\b"}, + {"testing/internal/testdeps", "\x02\v\xaa\x01.\x10,\x03\x05\x03\x06\a\x02\x0e"}, + {"testing/iotest", "\x03n\x03\x81\x01\x04"}, + {"testing/quick", "s\x01\x8d\x01\x05#\x10\x10"}, + {"testing/slogtest", "\rd\x03\x87\x01.\x05\x10\v"}, + {"testing/synctest", "\xde\x01`\x11"}, + {"text/scanner", "\x03q\x81\x01,*\x02"}, + {"text/tabwriter", "t\x81\x01X"}, + {"text/template", "q\x03B?\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, + {"text/template/parse", "\x03q\xba\x01\n\x01\x12\x02"}, + {"time", "q*\x1e#(*\r\x02\x12"}, + {"time/tzdata", "q\xcc\x01\x12"}, {"unicode", ""}, {"unicode/utf16", ""}, {"unicode/utf8", ""}, - {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"}, + {"unique", "\x97\x01!$\x01Q\r\x01\x13\x12"}, {"unsafe", ""}, - {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"}, - {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"}, - {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"}, + {"vendor/golang.org/x/crypto/chacha20", "\x10Z\a\x93\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10Z\a\xdf\x01\x04\x01\a"}, + {"vendor/golang.org/x/crypto/cryptobyte", "g\n\x03\x8e\x01' \n"}, {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, - {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"}, - {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"}, - {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, - {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"}, - {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"}, - {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"}, - {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"}, - {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, - {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"}, - {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"}, - {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"}, - {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"}, - {"weak", "\x94\x01\x96\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xcf\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "U\x15\x9a\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "q"}, + {"vendor/golang.org/x/net/http/httpguts", "\x8b\x02\x14\x1a\x14\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "q\x03\x97\x01\x10\x05\x01\x18\x14\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03n\x03\x81\x01F"}, + {"vendor/golang.org/x/net/idna", "t\x8d\x018\x14\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03g\a\x03\x81\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, + {"vendor/golang.org/x/sys/cpu", "\xa1\x02\r\n\x01\x16"}, + {"vendor/golang.org/x/text/secure/bidirule", "q\xdc\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03n\x84\x01X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bi\x85\x01>\x16"}, + {"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"}, + {"weak", "\x97\x01\x97\x01!"}, } + +// bootstrap is the list of bootstrap packages extracted from cmd/dist. +var bootstrap = map[string]bool{ + "cmp": true, + "cmd/asm": true, + "cmd/asm/internal/arch": true, + "cmd/asm/internal/asm": true, + "cmd/asm/internal/flags": true, + "cmd/asm/internal/lex": true, + "cmd/cgo": true, + "cmd/compile": true, + "cmd/compile/internal/abi": true, + "cmd/compile/internal/abt": true, + "cmd/compile/internal/amd64": true, + "cmd/compile/internal/arm": true, + "cmd/compile/internal/arm64": true, + "cmd/compile/internal/base": true, + "cmd/compile/internal/bitvec": true, + "cmd/compile/internal/compare": true, + "cmd/compile/internal/coverage": true, + "cmd/compile/internal/deadlocals": true, + "cmd/compile/internal/devirtualize": true, + "cmd/compile/internal/dwarfgen": true, + "cmd/compile/internal/escape": true, + "cmd/compile/internal/gc": true, + "cmd/compile/internal/importer": true, + "cmd/compile/internal/inline": true, + "cmd/compile/internal/inline/inlheur": true, + "cmd/compile/internal/inline/interleaved": true, + "cmd/compile/internal/ir": true, + "cmd/compile/internal/liveness": true, + "cmd/compile/internal/logopt": true, + "cmd/compile/internal/loong64": true, + "cmd/compile/internal/loopvar": true, + "cmd/compile/internal/mips": true, + "cmd/compile/internal/mips64": true, + "cmd/compile/internal/noder": true, + "cmd/compile/internal/objw": true, + "cmd/compile/internal/pgoir": true, + "cmd/compile/internal/pkginit": true, + "cmd/compile/internal/ppc64": true, + "cmd/compile/internal/rangefunc": true, + "cmd/compile/internal/reflectdata": true, + "cmd/compile/internal/riscv64": true, + "cmd/compile/internal/rttype": true, + "cmd/compile/internal/s390x": true, + "cmd/compile/internal/ssa": true, + "cmd/compile/internal/ssagen": true, + "cmd/compile/internal/staticdata": true, + "cmd/compile/internal/staticinit": true, + "cmd/compile/internal/syntax": true, + "cmd/compile/internal/test": true, + "cmd/compile/internal/typebits": true, + "cmd/compile/internal/typecheck": true, + "cmd/compile/internal/types": true, + "cmd/compile/internal/types2": true, + "cmd/compile/internal/walk": true, + "cmd/compile/internal/wasm": true, + "cmd/compile/internal/x86": true, + "cmd/internal/archive": true, + "cmd/internal/bio": true, + "cmd/internal/codesign": true, + "cmd/internal/dwarf": true, + "cmd/internal/edit": true, + "cmd/internal/gcprog": true, + "cmd/internal/goobj": true, + "cmd/internal/hash": true, + "cmd/internal/macho": true, + "cmd/internal/obj": true, + "cmd/internal/obj/arm": true, + "cmd/internal/obj/arm64": true, + "cmd/internal/obj/loong64": true, + "cmd/internal/obj/mips": true, + "cmd/internal/obj/ppc64": true, + "cmd/internal/obj/riscv": true, + "cmd/internal/obj/s390x": true, + "cmd/internal/obj/wasm": true, + "cmd/internal/obj/x86": true, + "cmd/internal/objabi": true, + "cmd/internal/par": true, + "cmd/internal/pgo": true, + "cmd/internal/pkgpath": true, + "cmd/internal/quoted": true, + "cmd/internal/src": true, + "cmd/internal/sys": true, + "cmd/internal/telemetry": true, + "cmd/internal/telemetry/counter": true, + "cmd/link": true, + "cmd/link/internal/amd64": true, + "cmd/link/internal/arm": true, + "cmd/link/internal/arm64": true, + "cmd/link/internal/benchmark": true, + "cmd/link/internal/dwtest": true, + "cmd/link/internal/ld": true, + "cmd/link/internal/loadelf": true, + "cmd/link/internal/loader": true, + "cmd/link/internal/loadmacho": true, + "cmd/link/internal/loadpe": true, + "cmd/link/internal/loadxcoff": true, + "cmd/link/internal/loong64": true, + "cmd/link/internal/mips": true, + "cmd/link/internal/mips64": true, + "cmd/link/internal/ppc64": true, + "cmd/link/internal/riscv64": true, + "cmd/link/internal/s390x": true, + "cmd/link/internal/sym": true, + "cmd/link/internal/wasm": true, + "cmd/link/internal/x86": true, + "compress/flate": true, + "compress/zlib": true, + "container/heap": true, + "debug/dwarf": true, + "debug/elf": true, + "debug/macho": true, + "debug/pe": true, + "go/build/constraint": true, + "go/constant": true, + "go/version": true, + "internal/abi": true, + "internal/coverage": true, + "cmd/internal/cov/covcmd": true, + "internal/bisect": true, + "internal/buildcfg": true, + "internal/exportdata": true, + "internal/goarch": true, + "internal/godebugs": true, + "internal/goexperiment": true, + "internal/goroot": true, + "internal/gover": true, + "internal/goversion": true, + "internal/lazyregexp": true, + "internal/pkgbits": true, + "internal/platform": true, + "internal/profile": true, + "internal/race": true, + "internal/runtime/gc": true, + "internal/saferio": true, + "internal/syscall/unix": true, + "internal/types/errors": true, + "internal/unsafeheader": true, + "internal/xcoff": true, + "internal/zstd": true, + "math/bits": true, + "sort": true, +} + +// BootstrapVersion is the minor version of Go used during toolchain +// bootstrapping. Packages for which [IsBootstrapPackage] must not use +// features of Go newer than this version. +const BootstrapVersion = Version(24) // go1.24.6 diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go index f6909878a8..8ecc672b8b 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/import.go +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -87,3 +87,11 @@ func find(pkg string) (int, bool) { return strings.Compare(p.name, n) }) } + +// IsBootstrapPackage reports whether pkg is one of the low-level +// packages in the Go distribution that must compile with the older +// language version specified by [BootstrapVersion] during toolchain +// bootstrapping; see golang.org/s/go15bootstrap. +func IsBootstrapPackage(pkg string) bool { + return bootstrap[pkg] +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index c1faa50d36..362f23c436 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -225,6 +225,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Buffer).Grow", Method, 1, ""}, {"(*Buffer).Len", Method, 0, ""}, {"(*Buffer).Next", Method, 0, ""}, + {"(*Buffer).Peek", Method, 26, ""}, {"(*Buffer).Read", Method, 0, ""}, {"(*Buffer).ReadByte", Method, 0, ""}, {"(*Buffer).ReadBytes", Method, 0, ""}, @@ -1628,6 +1629,7 @@ var PackageSymbols = map[string][]Symbol{ {"ResultNoRows", Var, 0, ""}, {"Rows", Type, 0, ""}, {"RowsAffected", Type, 0, ""}, + {"RowsColumnScanner", Type, 26, ""}, {"RowsColumnTypeDatabaseTypeName", Type, 8, ""}, {"RowsColumnTypeLength", Type, 8, ""}, {"RowsColumnTypeNullable", Type, 8, ""}, @@ -4953,6 +4955,7 @@ var PackageSymbols = map[string][]Symbol{ }, "errors": { {"As", Func, 13, "func(err error, target any) bool"}, + {"AsType", Func, 26, "func[E error](err error) (E, bool)"}, {"ErrUnsupported", Var, 21, ""}, {"Is", Func, 13, "func(err error, target error) bool"}, {"Join", Func, 20, "func(errs ...error) error"}, @@ -5090,7 +5093,7 @@ var PackageSymbols = map[string][]Symbol{ {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, - {"Errorf", Func, 0, "func(format string, a ...any) error"}, + {"Errorf", Func, 0, "func(format string, a ...any) (err error)"}, {"FormatString", Func, 20, "func(state State, verb rune) string"}, {"Formatter", Type, 0, ""}, {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"}, @@ -5155,6 +5158,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*DeclStmt).Pos", Method, 0, ""}, {"(*DeferStmt).End", Method, 0, ""}, {"(*DeferStmt).Pos", Method, 0, ""}, + {"(*Directive).End", Method, 26, ""}, + {"(*Directive).ParseArgs", Method, 26, ""}, + {"(*Directive).Pos", Method, 26, ""}, {"(*Ellipsis).End", Method, 0, ""}, {"(*Ellipsis).Pos", Method, 0, ""}, {"(*EmptyStmt).End", Method, 0, ""}, @@ -5320,6 +5326,15 @@ var PackageSymbols = map[string][]Symbol{ {"DeferStmt", Type, 0, ""}, {"DeferStmt.Call", Field, 0, ""}, {"DeferStmt.Defer", Field, 0, ""}, + {"Directive", Type, 26, ""}, + {"Directive.Args", Field, 26, ""}, + {"Directive.ArgsPos", Field, 26, ""}, + {"Directive.Name", Field, 26, ""}, + {"Directive.Slash", Field, 26, ""}, + {"Directive.Tool", Field, 26, ""}, + {"DirectiveArg", Type, 26, ""}, + {"DirectiveArg.Arg", Field, 26, ""}, + {"DirectiveArg.Pos", Field, 26, ""}, {"Ellipsis", Type, 0, ""}, {"Ellipsis.Ellipsis", Field, 0, ""}, {"Ellipsis.Elt", Field, 0, ""}, @@ -5469,6 +5484,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Lparen", Field, 0, ""}, {"ParenExpr.Rparen", Field, 0, ""}, {"ParenExpr.X", Field, 0, ""}, + {"ParseDirective", Func, 26, "func(pos token.Pos, c string) (Directive, bool)"}, {"Pkg", Const, 0, ""}, {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"}, {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"}, @@ -7271,6 +7287,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Logger).WarnContext", Method, 21, ""}, {"(*Logger).With", Method, 21, ""}, {"(*Logger).WithGroup", Method, 21, ""}, + {"(*MultiHandler).Enabled", Method, 26, ""}, + {"(*MultiHandler).Handle", Method, 26, ""}, + {"(*MultiHandler).WithAttrs", Method, 26, ""}, + {"(*MultiHandler).WithGroup", Method, 26, ""}, {"(*Record).Add", Method, 21, ""}, {"(*Record).AddAttrs", Method, 21, ""}, {"(*TextHandler).Enabled", Method, 21, ""}, @@ -7358,9 +7378,11 @@ var PackageSymbols = map[string][]Symbol{ {"LogValuer", Type, 21, ""}, {"Logger", Type, 21, ""}, {"MessageKey", Const, 21, ""}, + {"MultiHandler", Type, 26, ""}, {"New", Func, 21, "func(h Handler) *Logger"}, {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"}, {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"}, + {"NewMultiHandler", Func, 26, "func(handlers ...Handler) *MultiHandler"}, {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"}, {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"}, {"Record", Type, 21, ""}, @@ -7515,7 +7537,7 @@ var PackageSymbols = map[string][]Symbol{ {"MinInt64", Const, 0, ""}, {"MinInt8", Const, 0, ""}, {"Mod", Func, 0, "func(x float64, y float64) float64"}, - {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"}, + {"Modf", Func, 0, "func(f float64) (integer float64, fractional float64)"}, {"NaN", Func, 0, "func() float64"}, {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"}, {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"}, @@ -7972,6 +7994,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Unwrap", Method, 23, ""}, {"(*Dialer).Dial", Method, 1, ""}, {"(*Dialer).DialContext", Method, 7, ""}, + {"(*Dialer).DialIP", Method, 26, ""}, + {"(*Dialer).DialTCP", Method, 26, ""}, + {"(*Dialer).DialUDP", Method, 26, ""}, + {"(*Dialer).DialUnix", Method, 26, ""}, {"(*Dialer).MultipathTCP", Method, 21, ""}, {"(*Dialer).SetMultipathTCP", Method, 21, ""}, {"(*IP).UnmarshalText", Method, 2, ""}, @@ -8457,6 +8483,7 @@ var PackageSymbols = map[string][]Symbol{ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""}, {"HTTP2Config.PingTimeout", Field, 24, ""}, {"HTTP2Config.SendPingTimeout", Field, 24, ""}, + {"HTTP2Config.StrictMaxConcurrentRequests", Field, 26, ""}, {"HTTP2Config.WriteByteTimeout", Field, 24, ""}, {"Handle", Func, 0, "func(pattern string, handler Handler)"}, {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"}, @@ -8904,6 +8931,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Prefix).AppendText", Method, 24, ""}, {"(Prefix).AppendTo", Method, 18, ""}, {"(Prefix).Bits", Method, 18, ""}, + {"(Prefix).Compare", Method, 26, ""}, {"(Prefix).Contains", Method, 18, ""}, {"(Prefix).IsSingleIP", Method, 18, ""}, {"(Prefix).IsValid", Method, 18, ""}, @@ -9177,6 +9205,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Process).Release", Method, 0, ""}, {"(*Process).Signal", Method, 0, ""}, {"(*Process).Wait", Method, 0, ""}, + {"(*Process).WithHandle", Method, 26, ""}, {"(*ProcessState).ExitCode", Method, 12, ""}, {"(*ProcessState).Exited", Method, 0, ""}, {"(*ProcessState).Pid", Method, 0, ""}, @@ -9234,6 +9263,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrExist", Var, 0, ""}, {"ErrInvalid", Var, 0, ""}, {"ErrNoDeadline", Var, 10, ""}, + {"ErrNoHandle", Var, 26, ""}, {"ErrNotExist", Var, 0, ""}, {"ErrPermission", Var, 0, ""}, {"ErrProcessDone", Var, 16, ""}, @@ -9461,7 +9491,7 @@ var PackageSymbols = map[string][]Symbol{ {"ListSeparator", Const, 0, ""}, {"Localize", Func, 23, "func(path string) (string, error)"}, {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"}, - {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"}, + {"Rel", Func, 0, "func(basePath string, targPath string) (string, error)"}, {"Separator", Const, 0, ""}, {"SkipAll", Var, 20, ""}, {"SkipDir", Var, 0, ""}, @@ -9932,7 +9962,7 @@ var PackageSymbols = map[string][]Symbol{ {"PanicNilError", Type, 21, ""}, {"Pinner", Type, 21, ""}, {"ReadMemStats", Func, 0, "func(m *MemStats)"}, - {"ReadTrace", Func, 5, "func() []byte"}, + {"ReadTrace", Func, 5, "func() (buf []byte)"}, {"SetBlockProfileRate", Func, 1, "func(rate int)"}, {"SetCPUProfileRate", Func, 0, "func(hz int)"}, {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"}, @@ -16679,6 +16709,7 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0, ""}, }, "testing": { + {"(*B).ArtifactDir", Method, 26, ""}, {"(*B).Attr", Method, 25, ""}, {"(*B).Chdir", Method, 24, ""}, {"(*B).Cleanup", Method, 14, ""}, @@ -16713,6 +16744,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0, ""}, {"(*B).TempDir", Method, 15, ""}, {"(*F).Add", Method, 18, ""}, + {"(*F).ArtifactDir", Method, 26, ""}, {"(*F).Attr", Method, 25, ""}, {"(*F).Chdir", Method, 24, ""}, {"(*F).Cleanup", Method, 18, ""}, @@ -16738,6 +16770,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18, ""}, {"(*M).Run", Method, 4, ""}, {"(*PB).Next", Method, 3, ""}, + {"(*T).ArtifactDir", Method, 26, ""}, {"(*T).Attr", Method, 25, ""}, {"(*T).Chdir", Method, 24, ""}, {"(*T).Cleanup", Method, 14, ""}, diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index f49802b8ef..8d13f12147 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -160,8 +160,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in // The term set of an interface is the intersection of the term sets of its // embedded types. tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) + for embedded := range u.EmbeddedTypes() { if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } @@ -174,8 +173,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) + for t := range u.Terms() { var terms termlist switch t.Type().Underlying().(type) { case *types.Interface: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go index 4957f02164..5fe4d8abcb 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/element.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -35,8 +35,8 @@ func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T type // Recursion over signatures of each method. tmset := msets.MethodSet(T) - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) + for method := range tmset.Methods() { + sig := method.Type().(*types.Signature) // It is tempting to call visit(sig, false) // but, as noted in golang.org/cl/65450043, // the Signature.Recv field is ignored by diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go index 93acff2170..c846a53d5f 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/fx.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go @@ -19,25 +19,46 @@ func NoEffects(info *types.Info, expr ast.Expr) bool { switch v := n.(type) { case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr, *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, - *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType, - *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr: - // No effect + *ast.StarExpr, *ast.CompositeLit, + // non-expressions that may appear within expressions + *ast.KeyValueExpr, + *ast.FieldList, + *ast.Field, + *ast.Ellipsis, + *ast.IndexListExpr: + // No effect. + + case *ast.ArrayType, + *ast.StructType, + *ast.ChanType, + *ast.FuncType, + *ast.MapType, + *ast.InterfaceType: + // Type syntax: no effects, recursively. + // Prune descent. + return false + case *ast.UnaryExpr: - // Channel send <-ch has effects + // Channel send <-ch has effects. if v.Op == token.ARROW { noEffects = false } + case *ast.CallExpr: - // Type conversion has no effects + // Type conversion has no effects. if !info.Types[v.Fun].IsType() { - // TODO(adonovan): Add a case for built-in functions without side - // effects (by using callsPureBuiltin from tools/internal/refactor/inline) - - noEffects = false + if CallsPureBuiltin(info, v) { + // A call such as len(e) has no effects of its + // own, though the subexpression e might. + } else { + noEffects = false + } } + case *ast.FuncLit: // A FuncLit has no effects, but do not descend into it. return false + default: // All other expressions have effects noEffects = false @@ -47,3 +68,21 @@ func NoEffects(info *types.Info, expr ast.Expr) bool { }) return noEffects } + +// CallsPureBuiltin reports whether call is a call of a built-in +// function that is a pure computation over its operands (analogous to +// a + operator). Because it does not depend on program state, it may +// be evaluated at any point--though not necessarily at multiple +// points (consider new, make). +func CallsPureBuiltin(info *types.Info, call *ast.CallExpr) bool { + if id, ok := ast.Unparen(call.Fun).(*ast.Ident); ok { + if b, ok := info.ObjectOf(id).(*types.Builtin); ok { + switch b.Name() { + case "len", "cap", "complex", "imag", "real", "make", "new", "max", "min": + return true + } + // Not: append clear close copy delete panic print println recover + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go index f2affec4fb..e0d63c46c6 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -48,7 +48,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { return ok && IsPackageLevel(obj) && f.Pkg().Path() == pkgPath && - f.Type().(*types.Signature).Recv() == nil && + f.Signature().Recv() == nil && slices.Contains(names, f.Name()) } @@ -60,7 +60,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { // which is important for the performance of syntax matching. func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { if fn, ok := obj.(*types.Func); ok { - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { _, T := ReceiverNamed(recv) return T != nil && IsTypeNamed(T, pkgPath, typeName) && diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go index 64f47919f0..4e2756fc49 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -19,7 +19,7 @@ import ( // TODO(adonovan): this function ignores the effect of shadowing. It // should accept a [token.Pos] and a [types.Info] and compute only the // set of imports that are not shadowed at that point, analogous to -// [analysisinternal.AddImport]. It could also compute (as a side +// [analysis.AddImport]. It could also compute (as a side // effect) the set of additional imports required to ensure that there // is an accessible import for each necessary package, making it // converge even more closely with AddImport. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go index e5da049511..26499cdd2e 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -2,39 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package typesinternal +//go:build go1.25 -// TODO(adonovan): when CL 645115 lands, define the go1.25 version of -// this API that actually does something. +package typesinternal import "go/types" -type VarKind uint8 +type VarKind = types.VarKind const ( - _ VarKind = iota // (not meaningful) - PackageVar // a package-level variable - LocalVar // a local variable - RecvVar // a method receiver variable - ParamVar // a function parameter variable - ResultVar // a function result variable - FieldVar // a struct field + PackageVar = types.PackageVar + LocalVar = types.LocalVar + RecvVar = types.RecvVar + ParamVar = types.ParamVar + ResultVar = types.ResultVar + FieldVar = types.FieldVar ) -func (kind VarKind) String() string { - return [...]string{ - 0: "VarKind(0)", - PackageVar: "PackageVar", - LocalVar: "LocalVar", - RecvVar: "RecvVar", - ParamVar: "ParamVar", - ResultVar: "ResultVar", - FieldVar: "FieldVar", - }[kind] -} - -// GetVarKind returns an invalid VarKind. -func GetVarKind(v *types.Var) VarKind { return 0 } - -// SetVarKind has no effect. -func SetVarKind(v *types.Var, kind VarKind) {} +func GetVarKind(v *types.Var) VarKind { return v.Kind() } +func SetVarKind(v *types.Var, kind VarKind) { v.SetKind(kind) } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go new file mode 100644 index 0000000000..17b1804b4e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.25 + +package typesinternal + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index 453bba2ad5..d612a71029 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -258,12 +258,12 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { case *types.Signature: var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { + for v := range t.Params().Variables() { params = append(params, &ast.Field{ - Type: TypeExpr(t.Params().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), Names: []*ast.Ident{ { - Name: t.Params().At(i).Name(), + Name: v.Name(), }, }, }) @@ -273,9 +273,9 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} } var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { + for v := range t.Results().Variables() { returns = append(returns, &ast.Field{ - Type: TypeExpr(t.Results().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), }) } return &ast.FuncType{ @@ -315,8 +315,8 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { var indices []ast.Expr - for i := range typeArgs.Len() { - indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + for t0 := range typeArgs.Types() { + indices = append(indices, TypeExpr(t0, qual)) } expr = &ast.IndexListExpr{ X: expr, diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go index b53f178616..a5f4e3252c 100644 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -7,13 +7,17 @@ package versions // This file contains predicates for working with file versions to // decide when a tool should consider a language feature enabled. -// GoVersions that features in x/tools can be gated to. +// named constants, to avoid misspelling const ( Go1_18 = "go1.18" Go1_19 = "go1.19" Go1_20 = "go1.20" Go1_21 = "go1.21" Go1_22 = "go1.22" + Go1_23 = "go1.23" + Go1_24 = "go1.24" + Go1_25 = "go1.25" + Go1_26 = "go1.26" ) // Future is an invalid unknown Go version sometime in the future. diff --git a/vendor/modules.txt b/vendor/modules.txt index 25fe595288..bdeed3b33b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -54,7 +54,7 @@ github.com/Djarvur/go-err113 # github.com/Masterminds/semver/v3 v3.4.0 ## explicit; go 1.21 github.com/Masterminds/semver/v3 -# github.com/MirrexOne/unqueryvet v1.2.1 +# github.com/MirrexOne/unqueryvet v1.3.0 ## explicit; go 1.24.0 github.com/MirrexOne/unqueryvet github.com/MirrexOne/unqueryvet/internal/analyzer @@ -160,7 +160,7 @@ github.com/butuzov/ireturn/analyzer/internal/types ## explicit; go 1.19 github.com/butuzov/mirror github.com/butuzov/mirror/internal/checker -# github.com/catenacyber/perfsprint v0.10.0 +# github.com/catenacyber/perfsprint v0.10.1 ## explicit; go 1.24.0 github.com/catenacyber/perfsprint/analyzer # github.com/ccojocar/zxcvbn-go v1.0.4 @@ -323,7 +323,7 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/godoc-lint/godoc-lint v0.10.1 +# github.com/godoc-lint/godoc-lint v0.10.2 ## explicit; go 1.24 github.com/godoc-lint/godoc-lint/pkg/analysis github.com/godoc-lint/godoc-lint/pkg/check @@ -367,7 +367,7 @@ github.com/golangci/go-printf-func-name/pkg/analyzer # github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d ## explicit; go 1.22.0 github.com/golangci/gofmt/gofmt -# github.com/golangci/golangci-lint/v2 v2.6.2 +# github.com/golangci/golangci-lint/v2 v2.7.2 ## explicit; go 1.24.0 github.com/golangci/golangci-lint/v2/cmd/golangci-lint github.com/golangci/golangci-lint/v2/internal/cache @@ -625,7 +625,7 @@ github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid -# github.com/hashicorp/go-version v1.7.0 +# github.com/hashicorp/go-version v1.8.0 ## explicit github.com/hashicorp/go-version # github.com/hashicorp/golang-lru/v2 v2.0.7 @@ -835,8 +835,8 @@ github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.1 ## explicit github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/mgechev/revive v1.12.0 -## explicit; go 1.23.0 +# github.com/mgechev/revive v1.13.0 +## explicit; go 1.24.0 github.com/mgechev/revive/config github.com/mgechev/revive/formatter github.com/mgechev/revive/internal/astutils @@ -1004,7 +1004,7 @@ github.com/sashamelentyev/interfacebloat/pkg/analyzer ## explicit; go 1.23.0 github.com/sashamelentyev/usestdlibvars/pkg/analyzer github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping -# github.com/securego/gosec/v2 v2.22.10 +# github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7 ## explicit; go 1.24.0 github.com/securego/gosec/v2 github.com/securego/gosec/v2/analyzers @@ -1031,7 +1031,7 @@ github.com/sonatard/noctx # github.com/sourcegraph/go-diff v0.7.0 ## explicit; go 1.14 github.com/sourcegraph/go-diff/diff -# github.com/spf13/afero v1.14.0 +# github.com/spf13/afero v1.15.0 ## explicit; go 1.23.0 github.com/spf13/afero github.com/spf13/afero/internal/common @@ -1039,7 +1039,7 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.5.0 ## explicit; go 1.18 github.com/spf13/cast -# github.com/spf13/cobra v1.10.1 +# github.com/spf13/cobra v1.10.2 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 @@ -1062,8 +1062,8 @@ github.com/spf13/viper/internal/encoding/yaml # github.com/ssgreg/nlreturn/v2 v2.2.1 ## explicit; go 1.13 github.com/ssgreg/nlreturn/v2/pkg/nlreturn -# github.com/stbenjam/no-sprintf-host-port v0.2.0 -## explicit; go 1.18 +# github.com/stbenjam/no-sprintf-host-port v0.3.1 +## explicit; go 1.24.0 github.com/stbenjam/no-sprintf-host-port/pkg/analyzer # github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 @@ -1090,8 +1090,8 @@ github.com/timonwong/loggercheck/internal/checkers/printf github.com/timonwong/loggercheck/internal/rules github.com/timonwong/loggercheck/internal/sets github.com/timonwong/loggercheck/internal/stringutil -# github.com/tomarrell/wrapcheck/v2 v2.11.0 -## explicit; go 1.22.0 +# github.com/tomarrell/wrapcheck/v2 v2.12.0 +## explicit; go 1.24.0 github.com/tomarrell/wrapcheck/v2/wrapcheck # github.com/tommy-muehle/go-mnd/v2 v2.5.1 ## explicit; go 1.12 @@ -1189,6 +1189,9 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 +go.yaml.in/yaml/v3 # golang.org/x/crypto v0.45.0 ## explicit; go 1.24.0 golang.org/x/crypto/argon2 @@ -1212,7 +1215,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf # golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 ## explicit; go 1.24.0 golang.org/x/exp/typeparams -# golang.org/x/mod v0.29.0 +# golang.org/x/mod v0.30.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile @@ -1262,7 +1265,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/tools v0.38.0 +# golang.org/x/tools v0.39.0 ## explicit; go 1.24.0 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/analysis @@ -1290,7 +1293,7 @@ golang.org/x/tools/go/analysis/passes/httpmux golang.org/x/tools/go/analysis/passes/httpresponse golang.org/x/tools/go/analysis/passes/ifaceassert golang.org/x/tools/go/analysis/passes/inspect -golang.org/x/tools/go/analysis/passes/internal/analysisutil +golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal golang.org/x/tools/go/analysis/passes/loopclosure golang.org/x/tools/go/analysis/passes/lostcancel golang.org/x/tools/go/analysis/passes/modernize @@ -1332,10 +1335,10 @@ golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/imports golang.org/x/tools/internal/aliases -golang.org/x/tools/internal/analysisinternal -golang.org/x/tools/internal/analysisinternal/generated -golang.org/x/tools/internal/analysisinternal/typeindex +golang.org/x/tools/internal/analysis/analyzerutil +golang.org/x/tools/internal/analysis/typeindex golang.org/x/tools/internal/astutil +golang.org/x/tools/internal/cfginternal golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys @@ -1348,9 +1351,11 @@ golang.org/x/tools/internal/goplsexport golang.org/x/tools/internal/imports golang.org/x/tools/internal/modindex golang.org/x/tools/internal/moreiters +golang.org/x/tools/internal/packagepath golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/refactor +golang.org/x/tools/internal/ssainternal golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal