diff --git a/cmd/skywire-cli/commands/config/gen.go b/cmd/skywire-cli/commands/config/gen.go
index 105f96aa79..17380843e5 100644
--- a/cmd/skywire-cli/commands/config/gen.go
+++ b/cmd/skywire-cli/commands/config/gen.go
@@ -6,7 +6,6 @@ import (
"fmt"
"os"
"os/exec"
- "os/user"
"path/filepath"
"strings"
@@ -187,14 +186,6 @@ var genConfigCmd = &cobra.Command{
//don't write file with stdout
if !isStdout {
if skyenv.OS == "linux" {
- userLvl, err := user.Current()
- if err != nil {
- logger.WithError(err).Error("Failed to detect user.")
- } else {
- if userLvl.Username == "root" {
- isRoot = true
- }
- }
//warn when writing config as root to non root owned dir & fail on the reverse instance
if _, err = exec.LookPath("stat"); err == nil {
confPath1, _ := filepath.Split(confPath)
diff --git a/cmd/skywire-cli/commands/config/private.go b/cmd/skywire-cli/commands/config/private.go
new file mode 100644
index 0000000000..5f6546525c
--- /dev/null
+++ b/cmd/skywire-cli/commands/config/private.go
@@ -0,0 +1,123 @@
+package cliconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ coincipher "github.com/skycoin/skycoin/src/cipher"
+ "github.com/spf13/cobra"
+
+ "github.com/skycoin/skywire-utilities/pkg/logging"
+ "github.com/skycoin/skywire/pkg/skyenv"
+)
+
+var (
+ displayNodeIP bool
+ rewardAddress string
+ out string
+ pathstr string
+ fullpathstr string
+ getpathstr string
+ dummy string
+)
+
+func init() {
+
+ privacyConfigCmd.Flags().SortFlags = false
+ RootCmd.AddCommand(privacyConfigCmd)
+ privacyConfigCmd.AddCommand(setPrivacyConfigCmd)
+ privacyConfigCmd.AddCommand(getPrivacyConfigCmd)
+ setPrivacyConfigCmd.Flags().BoolVarP(&displayNodeIP, "publicip", "i", false, "display node ip")
+ // default is genesis address for skycoin blockchain ; for testing
+ setPrivacyConfigCmd.Flags().StringVarP(&rewardAddress, "address", "a", "2jBbGxZRGoQG1mqhPBnXnLTxK6oxsTf8os6", "reward address")
+ //use the correct path for the available pemissions
+ pathstr = skyenv.PackageConfig().LocalPath
+ fullpathstr = strings.Join([]string{pathstr, skyenv.PrivFile}, "/")
+ getpathstr = fullpathstr
+ if _, err := os.Stat(getpathstr); os.IsNotExist(err) {
+ getpathstr = ""
+ }
+ setPrivacyConfigCmd.Flags().StringVarP(&out, "out", "o", "", "output config: "+fullpathstr)
+ getPrivacyConfigCmd.Flags().StringVarP(&out, "out", "o", "", "read config from: "+getpathstr)
+ RootCmd.PersistentFlags().StringVar(&dummy, "rpc", "localhost:3435", "RPC server address")
+ RootCmd.PersistentFlags().MarkHidden("rpc") // nolint
+
+}
+
+var privacyConfigCmd = &cobra.Command{
+ SilenceErrors: true,
+ SilenceUsage: true,
+ Use: "priv",
+ Short: "rewards & privacy setting",
+ Long: `rewards & privacy setting
+
+Sets the skycoin rewards address and ip public for the visor.
+The config is written to the root of the default local directory
+Run this command with root permissions for visors running as root via systemd
+this config is served via dmsghttp along with transport logs
+and the system hardware survey for automating rewards distribution`,
+}
+
+var setPrivacyConfigCmd = &cobra.Command{
+ Use: "set
",
+ Short: "set reward address & node privacy",
+ Long: "set reward address & node privacy",
+ Run: func(cmd *cobra.Command, args []string) {
+ mLog := logging.NewMasterLogger()
+ mLog.SetLevel(logrus.InfoLevel)
+ if out == "" {
+ out = fullpathstr
+ }
+ if len(args) > 0 {
+ if args[0] != "" {
+ rewardAddress = args[0]
+ }
+ }
+ _, err := coincipher.DecodeBase58Address(rewardAddress)
+ if err != nil {
+ logger.WithError(err).Fatal("invalid address specified")
+ }
+
+ confp := &skyenv.Privacy{}
+ confp.DisplayNodeIP = displayNodeIP
+ confp.RewardAddress = rewardAddress
+
+ // Print results.
+ j, err := json.MarshalIndent(confp, "", "\t")
+ if err != nil {
+ logger.WithError(err).Fatal("Could not marshal json.")
+ }
+ if _, err := os.Stat(pathstr); os.IsNotExist(err) {
+ logger.WithError(err).Fatal("\n local directory not found ; run skywire first to create this path\n ")
+ }
+ err = os.WriteFile(out, j, 0644) //nolint
+ if err != nil {
+ logger.WithError(err).Fatal("Failed to write config to file.")
+ }
+ logger.Infof("Updated file '%s' to:\n%s\n", out, j)
+ },
+}
+var getPrivacyConfigCmd = &cobra.Command{
+ Use: "get",
+ Short: "read reward address & privacy setting from file",
+ Long: `read reward address & privacy setting from file`,
+ Run: func(cmd *cobra.Command, args []string) {
+ mLog := logging.NewMasterLogger()
+ mLog.SetLevel(logrus.InfoLevel)
+ if out == "" {
+ out = getpathstr
+ }
+ if out == "" {
+ logger.Fatal("config was not detected and no path was specified.")
+ }
+ p, err := os.ReadFile(filepath.Clean(out))
+ if err != nil {
+ logger.WithError(err).Fatal("Failed to read config file.")
+ }
+ fmt.Printf("%s\n", p)
+ },
+}
diff --git a/cmd/skywire-cli/commands/config/root.go b/cmd/skywire-cli/commands/config/root.go
index e7e341e273..8ed1a8ab8d 100644
--- a/cmd/skywire-cli/commands/config/root.go
+++ b/cmd/skywire-cli/commands/config/root.go
@@ -8,6 +8,7 @@ import (
"github.com/skycoin/skywire-utilities/pkg/cipher"
"github.com/skycoin/skywire-utilities/pkg/logging"
utilenv "github.com/skycoin/skywire-utilities/pkg/skyenv"
+ "github.com/skycoin/skywire/pkg/skyenv"
"github.com/skycoin/skywire/pkg/visor/visorconfig"
)
@@ -42,7 +43,7 @@ var (
isAll bool
isOutUnset bool
ver string
- isRoot bool
+ isRoot = skyenv.IsRoot()
svcconf = strings.ReplaceAll(utilenv.ServiceConfAddr, "http://", "") //skyenv.DefaultServiceConfAddr
testconf = strings.ReplaceAll(utilenv.TestServiceConfAddr, "http://", "") //skyenv.DefaultServiceConfAddr
ghiddenflags []string
diff --git a/cmd/skywire-cli/commands/rpc/root.go b/cmd/skywire-cli/commands/rpc/root.go
index 711d2a44e9..3a747f4095 100644
--- a/cmd/skywire-cli/commands/rpc/root.go
+++ b/cmd/skywire-cli/commands/rpc/root.go
@@ -6,11 +6,11 @@ import (
"net"
"time"
+ "github.com/spf13/pflag"
+
"github.com/skycoin/skywire-utilities/pkg/logging"
"github.com/skycoin/skywire/cmd/skywire-cli/internal"
"github.com/skycoin/skywire/pkg/visor"
-
- "github.com/spf13/pflag"
)
var (
diff --git a/cmd/skywire-cli/commands/visor/privacy.go b/cmd/skywire-cli/commands/visor/privacy.go
new file mode 100644
index 0000000000..f2d05b5c3f
--- /dev/null
+++ b/cmd/skywire-cli/commands/visor/privacy.go
@@ -0,0 +1,70 @@
+package clivisor
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+
+ "github.com/skycoin/skywire-utilities/pkg/logging"
+ clirpc "github.com/skycoin/skywire/cmd/skywire-cli/commands/rpc"
+ "github.com/skycoin/skywire/cmd/skywire-cli/internal"
+ "github.com/skycoin/skywire/pkg/skyenv"
+)
+
+var (
+ displayNodeIP bool
+ rewardAddress string
+ out string
+ pathstr string
+)
+
+func init() {
+
+ RootCmd.AddCommand(privacyCmd)
+ privacyCmd.AddCommand(setPrivacyCmd)
+ privacyCmd.AddCommand(getPrivacyCmd)
+ privacyCmd.Flags().SortFlags = false
+ setPrivacyCmd.Flags().BoolVarP(&displayNodeIP, "publicip", "i", false, "display node ip")
+ // default is genesis address for skycoin blockchain ; for testing
+ setPrivacyCmd.Flags().StringVarP(&rewardAddress, "address", "a", "2jBbGxZRGoQG1mqhPBnXnLTxK6oxsTf8os6", "reward address")
+ //use the correct path for the available pemissions
+ pathstr = strings.Join([]string{skyenv.Config().LocalPath, skyenv.PrivFile}, "/")
+ setPrivacyCmd.Flags().StringVarP(&out, "out", "o", "", "output config: "+pathstr)
+}
+
+var privacyCmd = &cobra.Command{
+ Use: "priv",
+ Short: "privacy settings",
+ Long: "configure privacy settings\n\ntest of the api endpoints GetPrivacy & SetPrivacy",
+ Hidden: true,
+}
+var setPrivacyCmd = &cobra.Command{
+ Use: "set",
+ Short: "set privacy.json via rpc",
+ Long: "configure privacy settings\n\ntest of the api endpoint SetPrivacy",
+ Run: func(cmd *cobra.Command, args []string) {
+ mLog := logging.NewMasterLogger()
+ mLog.SetLevel(logrus.InfoLevel)
+ log := logging.MustGetLogger("skywire-cli visor priv set")
+ client := clirpc.Client(cmd.Flags())
+ resp, err := client.SetPrivacy(skyenv.Privacy{DisplayNodeIP: displayNodeIP, RewardAddress: rewardAddress})
+ if err != nil {
+ internal.PrintFatalError(cmd.Flags(), fmt.Errorf("Failed to connect: %v", err))
+ }
+ log.Info("Privacy settings updated to:\n", resp)
+ },
+}
+var getPrivacyCmd = &cobra.Command{
+ Use: "get",
+ Short: "read privacy setting from file",
+ Long: "configure privacy settings\n\ntest of the api endpoints GetPrivacy",
+ Run: func(cmd *cobra.Command, args []string) {
+ p, err := clirpc.Client(cmd.Flags()).GetPrivacy()
+ if err != nil {
+ internal.PrintFatalError(cmd.Flags(), fmt.Errorf("Failed to connect: %v", err))
+ }
+ fmt.Printf("%s", p)
+ },
+}
diff --git a/cmd/skywire-visor/commands/root.go b/cmd/skywire-visor/commands/root.go
index 1998af912c..2aca24496d 100644
--- a/cmd/skywire-visor/commands/root.go
+++ b/cmd/skywire-visor/commands/root.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"embed"
+ "encoding/json"
"fmt"
"io"
"io/fs"
@@ -12,7 +13,6 @@ import (
_ "net/http/pprof" // nolint:gosec // https://golang.org/doc/diagnostics.html#profiling
"os"
"os/exec"
- "os/user"
"path/filepath"
"strings"
"sync"
@@ -55,6 +55,7 @@ var (
stdin bool
launchBrowser bool
hypervisorUI bool
+ noHypervisorUI bool
remoteHypervisorPKs string
disableHypervisorPKs bool
isAutoPeer bool
@@ -66,7 +67,7 @@ var (
all bool
pkg bool
usr bool
- localIPs []net.IP
+ localIPs []net.IP // nolint:unused
// root indicates process is run with root permissions
root bool // nolint:unused
// visorBuildInfo holds information about the build
@@ -74,15 +75,9 @@ var (
)
func init() {
- usrLvl, err := user.Current()
- if err != nil {
- panic(err)
- }
- if usrLvl.Username == "root" {
- root = true
- }
+ root = skyenv.IsRoot()
- localIPs, err = netutil.DefaultNetworkInterfaceIPs()
+ localIPs, err := netutil.DefaultNetworkInterfaceIPs()
if err != nil {
logger.WithError(err).Warn("Could not determine network interface IP address")
if len(localIPs) == 0 {
@@ -97,6 +92,8 @@ func init() {
rootCmd.Flags().BoolVarP(&launchBrowser, "browser", "b", false, "open hypervisor ui in default web browser")
}
rootCmd.Flags().BoolVarP(&hypervisorUI, "hvui", "i", false, "run as hypervisor")
+ rootCmd.Flags().BoolVarP(&noHypervisorUI, "nohvui", "x", false, "disable hypervisor")
+ hiddenflags = append(hiddenflags, "nohvui")
rootCmd.Flags().StringVarP(&remoteHypervisorPKs, "hv", "j", "", "add remote hypervisor PKs at runtime")
hiddenflags = append(hiddenflags, "hv")
rootCmd.Flags().BoolVarP(&disableHypervisorPKs, "xhv", "k", false, "disable remote hypervisors set in config file")
@@ -252,6 +249,18 @@ func runVisor(conf *visorconfig.V1) {
conf = initConfig(log, confPath)
}
+ survey := skyenv.SystemSurvey()
+ survey.PubKey = conf.PK
+ // Print results.
+ s, err := json.MarshalIndent(survey, "", "\t")
+ if err != nil {
+ log.WithError(err).Error("Could not marshal json.")
+ }
+ err = os.WriteFile(conf.LocalPath+"/"+skyenv.SurveyFile, s, 0644) //nolint
+ if err != nil {
+ log.WithError(err).Error("Failed to write system hardware survey to file.")
+ }
+
if skyenv.OS == "linux" {
//warn about creating files & directories as root in non root-owned dir
if _, err := exec.LookPath("stat"); err == nil {
@@ -268,7 +277,7 @@ func runVisor(conf *visorconfig.V1) {
log.Error("cannot stat: /root")
}
if (owner != rootOwner) && root {
- log.Warn("writing config as root to directory not owned by root")
+ log.Warn("writing as root to directory not owned by root")
}
if !root && (owner == rootOwner) {
log.Fatal("Insufficient permissions to write to the specified path")
@@ -466,6 +475,10 @@ func initConfig(mLog *logging.MasterLogger, confPath string) *visorconfig.V1 { /
if conf.Hypervisor != nil {
conf.Hypervisor.UIAssets = uiAssets
}
+ if noHypervisorUI {
+ conf.Hypervisor = nil
+ }
+
return conf
}
diff --git a/go.mod b/go.mod
index 376499157d..c43f39279a 100644
--- a/go.mod
+++ b/go.mod
@@ -45,6 +45,7 @@ require (
github.com/go-chi/chi/v5 v5.0.8-0.20220103230436-7dbe9a0bd10f
github.com/ivanpirog/coloredcobra v1.0.0
github.com/james-barrow/golang-ipc v0.0.0-20210227130457-95e7cc81f5e2
+ github.com/jaypipes/ghw v0.9.0
github.com/lib/pq v1.10.7
github.com/skycoin/dmsg v0.0.0-20220904231115-c313c992c788
github.com/skycoin/skywire-utilities v0.0.0-20220712142443-abafa30105ce
@@ -58,17 +59,20 @@ require (
github.com/ActiveState/termtest/conpty v0.5.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Microsoft/go-winio v0.4.16 // indirect
- github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
+ github.com/StackExchange/wmi v1.2.1 // indirect
github.com/creack/pty v1.1.15 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.13.0 // indirect
- github.com/go-ole/go-ole v1.2.4 // indirect
+ github.com/ghodss/yaml v1.0.0 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/jaypipes/pcidb v1.0.0 // indirect
github.com/klauspost/compress v1.11.0 // indirect
github.com/klauspost/cpuid v1.2.4 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
@@ -83,7 +87,9 @@ require (
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
golang.org/x/mod v0.5.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ howett.net/plist v1.0.0 // indirect
)
// Uncomment for tests with alternate branches of 'dmsg'
diff --git a/go.sum b/go.sum
index aa2f155df3..64c398e52c 100644
--- a/go.sum
+++ b/go.sum
@@ -67,8 +67,9 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
+github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -153,6 +154,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/gen2brain/dlgs v0.0.0-20210911090025-cbd38e821b98 h1:wkHRSagNSNKP54v6Pf/Tebhe8bQLLkg6FQaM4/y8v2g=
github.com/gen2brain/dlgs v0.0.0-20210911090025-cbd38e821b98/go.mod h1:/eFcjDXaU2THSOOqLxOPETIbHETnamk8FA/hMjhg/gU=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
@@ -166,8 +168,10 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
+github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -314,7 +318,12 @@ github.com/ivanpirog/coloredcobra v1.0.0 h1:MY8hiTd5pfXE6K2OPDAUZvx7M8N2rXmd0hyW
github.com/ivanpirog/coloredcobra v1.0.0/go.mod h1:iho4nEKcnwZFiniGSdcgdvRgZNjxm+h20acv8vqmN6Q=
github.com/james-barrow/golang-ipc v0.0.0-20210227130457-95e7cc81f5e2 h1:lnIIG509NeyPk/15ZHqP3DwTTQXqp2PoQoxGdYDC2h4=
github.com/james-barrow/golang-ipc v0.0.0-20210227130457-95e7cc81f5e2/go.mod h1:M3eGiVVY7bdtqyWT+gtbIqji7CqHi3PKJHSPl2pP40c=
+github.com/jaypipes/ghw v0.9.0 h1:TWF4wNIGtZcgDJaiNcFgby5BR8s2ixcUe0ydxNO2McY=
+github.com/jaypipes/ghw v0.9.0/go.mod h1:dXMo19735vXOjpIBDyDYSp31sB2u4hrtRCMxInqQ64k=
+github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8=
+github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -379,6 +388,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
@@ -501,10 +512,12 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
@@ -788,6 +801,7 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1040,6 +1054,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1047,6 +1062,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
@@ -1060,6 +1076,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
+howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
nhooyr.io/websocket v1.8.2 h1:LwdzfyyOZKtVFoXay6A39Acu03KmidSZ3YUUvPa13PA=
nhooyr.io/websocket v1.8.2/go.mod h1:LiqdCg1Cu7TPWxEvPjPa0TGYxCsy4pHNTN9gGluwBpQ=
periph.io/x/periph v3.6.8+incompatible h1:lki0ie6wHtvlilXhIkabdCUQMpb5QN4Fx33yNQdqnaA=
diff --git a/pkg/skyenv/values.go b/pkg/skyenv/values.go
index b7bbb77126..976bb2ea8d 100644
--- a/pkg/skyenv/values.go
+++ b/pkg/skyenv/values.go
@@ -4,11 +4,14 @@ package skyenv
import (
"os"
"os/exec"
+ "os/user"
"path/filepath"
"strings"
"time"
"github.com/bitfield/script"
+ "github.com/google/uuid"
+ "github.com/jaypipes/ghw"
"github.com/skycoin/skywire-utilities/pkg/buildinfo"
"github.com/skycoin/skywire-utilities/pkg/cipher"
@@ -171,3 +174,47 @@ func HomePath() string {
dir, _ := os.UserHomeDir() //nolint
return dir
}
+
+// Config returns either UserConfig or PackageConfig based on permissions
+func Config() PkgConfig {
+ if IsRoot() {
+ return PackageConfig()
+ }
+ return UserConfig()
+}
+
+// IsRoot checks for root permissions
+func IsRoot() bool {
+ userLvl, _ := user.Current() //nolint
+ return userLvl.Username == "root"
+}
+
+// Privacy represents the json-encoded contents of the privacy.json file
+type Privacy struct {
+ DisplayNodeIP bool `json:"display_node_ip"`
+ RewardAddress string `json:"reward_address,omitempty"`
+}
+
+// Survey system hardware survey struct
+type Survey struct {
+ UUID uuid.UUID
+ PubKey cipher.PubKey
+ Disks *ghw.BlockInfo
+ Product *ghw.ProductInfo
+ Memory *ghw.MemoryInfo
+}
+
+// SurveyFile is the name of the survey file
+const SurveyFile string = "system.json"
+
+// PrivFile is the name of the file containing skycoin rewards address and privacy setting
+const PrivFile string = "privacy.json"
+
+// SystemSurvey returns system hardware survey
+func SystemSurvey() (s Survey) {
+ s.UUID = uuid.New()
+ s.Disks, _ = ghw.Block() //nolint
+ s.Product, _ = ghw.Product() //nolint
+ s.Memory, _ = ghw.Memory() //nolint
+ return s
+}
diff --git a/pkg/visor/api.go b/pkg/visor/api.go
index 68d14af875..82edba23f2 100644
--- a/pkg/visor/api.go
+++ b/pkg/visor/api.go
@@ -12,6 +12,7 @@ import (
"sync/atomic"
"time"
+ "github.com/bitfield/script"
"github.com/ccding/go-stun/stun"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
@@ -37,6 +38,8 @@ type API interface {
Health() (*HealthInfo, error)
Uptime() (float64, error)
+ SetPrivacy(skyenv.Privacy) (string, error)
+ GetPrivacy() (string, error)
App(appName string) (*appserver.AppState, error)
Apps() ([]*appserver.AppState, error)
StartApp(appName string) error
@@ -306,6 +309,35 @@ func (v *Visor) Uptime() (float64, error) {
return time.Since(v.startedAt).Seconds(), nil
}
+// SetPrivacy implements API.
+func (v *Visor) SetPrivacy(p skyenv.Privacy) (string, error) {
+ /*
+ skywire-cli config priv set [flags]
+ Flags:
+ -a, --address string reward address (default "2jBbGxZRGoQG1mqhPBnXnLTxK6oxsTf8os6")
+ -o, --out string output config: /opt/skywire/local/privacy.json
+ -i, --publicip display node ip
+ */
+ clicmd := `skywire-cli config priv set `
+ //Set flags for node privacy and reward address based on input
+ if p.DisplayNodeIP {
+ clicmd = clicmd + ` -i `
+ }
+ if p.RewardAddress != "" {
+ clicmd = clicmd + ` -a ` + p.RewardAddress
+ }
+ //use the currently configured local_path this visor is using
+ clicmd = clicmd + ` -o ` + strings.Join([]string{v.conf.LocalPath, skyenv.PrivFile}, "/")
+
+ return script.Exec(clicmd).String()
+}
+
+// GetPrivacy implements API.
+func (v *Visor) GetPrivacy() (p string, err error) {
+ clicmd := `skywire-cli config priv get -o ` + strings.Join([]string{v.conf.LocalPath, skyenv.PrivFile}, "/") + ` --json`
+ return script.Exec(clicmd).String()
+}
+
// Apps implements API.
func (v *Visor) Apps() ([]*appserver.AppState, error) {
return v.appL.AppStates(), nil
diff --git a/pkg/visor/hypervisor.go b/pkg/visor/hypervisor.go
index 3e373c55b2..262468da0d 100644
--- a/pkg/visor/hypervisor.go
+++ b/pkg/visor/hypervisor.go
@@ -266,6 +266,9 @@ func (hv *Hypervisor) makeMux() chi.Router {
r.Put("/visors/{pk}/persistent-transports", hv.putPersistentTransports())
r.Get("/visors/{pk}/log/rotation", hv.getLogRotationInterval())
r.Put("/visors/{pk}/log/rotation", hv.putLogRotationInterval())
+ r.Get("/visors/{pubkey}/privacy", hv.getPrivacy())
+ r.Put("/visors/{pubkey}/privacy", hv.putPrivacy())
+
})
})
@@ -1264,6 +1267,37 @@ func (hv *Hypervisor) getLogRotationInterval() http.HandlerFunc {
})
}
+func (hv *Hypervisor) putPrivacy() http.HandlerFunc {
+ return hv.withCtx(hv.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) {
+ var reqBody skyenv.Privacy
+
+ if err := httputil.ReadJSON(r, &reqBody); err != nil {
+ if err != io.EOF {
+ hv.log(r).Warnf("putPersistentTransports request: %v", err)
+ }
+ httputil.WriteJSON(w, r, http.StatusBadRequest, usermanager.ErrMalformedRequest)
+ return
+ }
+
+ if _, err := ctx.API.SetPrivacy(reqBody); err != nil {
+ httputil.WriteJSON(w, r, http.StatusInternalServerError, err)
+ return
+ }
+ httputil.WriteJSON(w, r, http.StatusOK, struct{}{})
+ })
+}
+
+func (hv *Hypervisor) getPrivacy() http.HandlerFunc {
+ return hv.withCtx(hv.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) {
+ pts, err := ctx.API.GetPrivacy()
+ if err != nil {
+ httputil.WriteJSON(w, r, http.StatusInternalServerError, err)
+ return
+ }
+ httputil.WriteJSON(w, r, http.StatusOK, pts)
+ })
+}
+
/*
<<< Helper functions >>>
*/
diff --git a/pkg/visor/rpc.go b/pkg/visor/rpc.go
index 27c9021674..d0fb3302ed 100644
--- a/pkg/visor/rpc.go
+++ b/pkg/visor/rpc.go
@@ -13,6 +13,7 @@ import (
"github.com/skycoin/skywire/pkg/app/appserver"
"github.com/skycoin/skywire/pkg/routing"
"github.com/skycoin/skywire/pkg/servicedisc"
+ "github.com/skycoin/skywire/pkg/skyenv"
"github.com/skycoin/skywire/pkg/transport"
"github.com/skycoin/skywire/pkg/transport/network"
"github.com/skycoin/skywire/pkg/util/rpcutil"
@@ -90,6 +91,26 @@ func (r *RPC) Uptime(_ *struct{}, out *float64) (err error) {
return err
}
+/*
+ <<< SKYCOIN REWARD ADDRESS AND PRIVACY SETTING >>>
+*/
+
+// SetPrivacy sets the reward address and privacy setting in privacy.json
+func (r *RPC) SetPrivacy(p skyenv.Privacy, _ *struct{}) (err error) {
+ defer rpcutil.LogCall(r.log, "SetPrivacy", p)(nil, &err)
+ _, err = r.visor.SetPrivacy(p)
+ return err
+}
+
+// GetPrivacy reads the reward address and privacy setting from privacy.json
+func (r *RPC) GetPrivacy(_ *struct{}, p *string) (err error) {
+ defer rpcutil.LogCall(r.log, "GetPrivacy", nil)(p, &err)
+ var q string
+ q, err = r.visor.GetPrivacy()
+ *p = q
+ return err
+}
+
/*
<<< APP LOGS >>>
*/
diff --git a/pkg/visor/rpc_client.go b/pkg/visor/rpc_client.go
index 781dd01060..30963d79de 100644
--- a/pkg/visor/rpc_client.go
+++ b/pkg/visor/rpc_client.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/binary"
"encoding/hex"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -121,6 +122,25 @@ func (rc *rpcClient) Uptime() (float64, error) {
return out, err
}
+// SetPrivacy implements API.
+func (rc *rpcClient) SetPrivacy(p skyenv.Privacy) (string, error) {
+ err := rc.Call("SetPrivacy", &p, &struct{}{})
+ if err != nil {
+ return "", err
+ }
+ q, err := json.Marshal(p)
+ if err != nil {
+ return "", err
+ }
+ return string(q), err
+}
+
+// GetPrivacy implements API.
+func (rc *rpcClient) GetPrivacy() (p string, err error) {
+ err = rc.Call("GetPrivacy", &struct{}{}, &p)
+ return p, err
+}
+
// Apps calls Apps.
func (rc *rpcClient) Apps() ([]*appserver.AppState, error) {
states := make([]*appserver.AppState, 0)
@@ -622,6 +642,16 @@ func (mc *mockRPCClient) Uptime() (float64, error) {
return time.Since(mc.startedAt).Seconds(), nil
}
+// SetPrivacy implements API
+func (mc *mockRPCClient) SetPrivacy(p skyenv.Privacy) (string, error) {
+ return "", nil
+}
+
+// GetPrivacy implements API.
+func (mc *mockRPCClient) GetPrivacy() (p string, err error) {
+ return p, nil
+}
+
// Apps implements API.
func (mc *mockRPCClient) Apps() ([]*appserver.AppState, error) {
var apps []*appserver.AppState
diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md
index 426d1a46b4..c4a432d6db 100644
--- a/vendor/github.com/StackExchange/wmi/README.md
+++ b/vendor/github.com/StackExchange/wmi/README.md
@@ -4,3 +4,10 @@ wmi
Package wmi provides a WQL interface to Windows WMI.
Note: It interfaces with WMI on the local machine, therefore it only runs on Windows.
+
+---
+
+NOTE: This project is no longer being actively maintained. If you would like
+to become its new owner, please contact tlimoncelli at stack over flow dot com.
+
+---
diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go
index eab18cbfee..b4bb4f0901 100644
--- a/vendor/github.com/StackExchange/wmi/wmi.go
+++ b/vendor/github.com/StackExchange/wmi/wmi.go
@@ -68,7 +68,8 @@ func QueryNamespace(query string, dst interface{}, namespace string) error {
//
// By default, the local machine and default namespace are used. These can be
// changed using connectServerArgs. See
-// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
+// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
+// for details.
//
// Query is a wrapper around DefaultClient.Query.
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
@@ -78,6 +79,14 @@ func Query(query string, dst interface{}, connectServerArgs ...interface{}) erro
return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...)
}
+// CallMethod calls a method named methodName on an instance of the class named
+// className, with the given params.
+//
+// CallMethod is a wrapper around DefaultClient.CallMethod.
+func CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) {
+ return DefaultClient.CallMethod(connectServerArgs, className, methodName, params)
+}
+
// A Client is an WMI query client.
//
// Its zero value (DefaultClient) is a usable client.
@@ -109,9 +118,103 @@ type Client struct {
SWbemServicesClient *SWbemServices
}
-// DefaultClient is the default Client and is used by Query, QueryNamespace
+// DefaultClient is the default Client and is used by Query, QueryNamespace, and CallMethod.
var DefaultClient = &Client{}
+// coinitService coinitializes WMI service. If no error is returned, a cleanup function
+// is returned which must be executed (usually deferred) to clean up allocated resources.
+func (c *Client) coinitService(connectServerArgs ...interface{}) (*ole.IDispatch, func(), error) {
+ var unknown *ole.IUnknown
+ var wmi *ole.IDispatch
+ var serviceRaw *ole.VARIANT
+
+ // be sure teardown happens in the reverse
+ // order from that which they were created
+ deferFn := func() {
+ if serviceRaw != nil {
+ serviceRaw.Clear()
+ }
+ if wmi != nil {
+ wmi.Release()
+ }
+ if unknown != nil {
+ unknown.Release()
+ }
+ ole.CoUninitialize()
+ }
+
+ // if we error'ed here, clean up immediately
+ var err error
+ defer func() {
+ if err != nil {
+ deferFn()
+ }
+ }()
+
+ err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
+ if err != nil {
+ oleCode := err.(*ole.OleError).Code()
+ if oleCode != ole.S_OK && oleCode != S_FALSE {
+ return nil, nil, err
+ }
+ }
+
+ unknown, err = oleutil.CreateObject("WbemScripting.SWbemLocator")
+ if err != nil {
+ return nil, nil, err
+ } else if unknown == nil {
+ return nil, nil, ErrNilCreateObject
+ }
+
+ wmi, err = unknown.QueryInterface(ole.IID_IDispatch)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // service is a SWbemServices
+ serviceRaw, err = oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return serviceRaw.ToIDispatch(), deferFn, nil
+}
+
+// CallMethod calls a WMI method named methodName on an instance
+// of the class named className. It passes in the arguments given
+// in params. Use connectServerArgs to customize the machine and
+// namespace; by default, the local machine and default namespace
+// are used. See
+// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
+// for details.
+func (c *Client) CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) {
+ service, cleanup, err := c.coinitService(connectServerArgs...)
+ if err != nil {
+ return 0, fmt.Errorf("coinit: %v", err)
+ }
+ defer cleanup()
+
+ // Get class
+ classRaw, err := oleutil.CallMethod(service, "Get", className)
+ if err != nil {
+ return 0, fmt.Errorf("CallMethod Get class %s: %v", className, err)
+ }
+ class := classRaw.ToIDispatch()
+ defer classRaw.Clear()
+
+ // Run method
+ resultRaw, err := oleutil.CallMethod(class, methodName, params...)
+ if err != nil {
+ return 0, fmt.Errorf("CallMethod %s.%s: %v", className, methodName, err)
+ }
+ resultInt, ok := resultRaw.Value().(int32)
+ if !ok {
+ return 0, fmt.Errorf("return value was not an int32: %v (%T)", resultRaw, resultRaw)
+ }
+
+ return resultInt, nil
+}
+
// Query runs the WQL query and appends the values to dst.
//
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
@@ -121,7 +224,8 @@ var DefaultClient = &Client{}
//
// By default, the local machine and default namespace are used. These can be
// changed using connectServerArgs. See
-// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
+// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
+// for details.
func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
dv := reflect.ValueOf(dst)
if dv.Kind() != reflect.Ptr || dv.IsNil() {
@@ -138,36 +242,11 @@ func (c *Client) Query(query string, dst interface{}, connectServerArgs ...inter
runtime.LockOSThread()
defer runtime.UnlockOSThread()
- err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
- if err != nil {
- oleCode := err.(*ole.OleError).Code()
- if oleCode != ole.S_OK && oleCode != S_FALSE {
- return err
- }
- }
- defer ole.CoUninitialize()
-
- unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
- if err != nil {
- return err
- } else if unknown == nil {
- return ErrNilCreateObject
- }
- defer unknown.Release()
-
- wmi, err := unknown.QueryInterface(ole.IID_IDispatch)
+ service, cleanup, err := c.coinitService(connectServerArgs...)
if err != nil {
return err
}
- defer wmi.Release()
-
- // service is a SWbemServices
- serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
- if err != nil {
- return err
- }
- service := serviceRaw.ToIDispatch()
- defer serviceRaw.Clear()
+ defer cleanup()
// result is a SWBemObjectSet
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query)
@@ -265,6 +344,9 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat
f = f.Elem()
}
n := v.Type().Field(i).Name
+ if n[0] < 'A' || n[0] > 'Z' {
+ continue
+ }
if !f.CanSet() {
return &ErrFieldMismatch{
StructType: of.Type(),
@@ -478,7 +560,10 @@ func oleInt64(item *ole.IDispatch, prop string) (int64, error) {
// CreateQuery returns a WQL query string that queries all columns of src. where
// is an optional string that is appended to the query, to be used with WHERE
// clauses. In such a case, the "WHERE" string should appear at the beginning.
-func CreateQuery(src interface{}, where string) string {
+// The wmi class is obtained by the name of the type. You can pass a optional
+// class throught the variadic class parameter which is useful for anonymous
+// structs.
+func CreateQuery(src interface{}, where string, class ...string) string {
var b bytes.Buffer
b.WriteString("SELECT ")
s := reflect.Indirect(reflect.ValueOf(src))
@@ -495,7 +580,11 @@ func CreateQuery(src interface{}, where string) string {
}
b.WriteString(strings.Join(fields, ", "))
b.WriteString(" FROM ")
- b.WriteString(t.Name())
+ if len(class) > 0 {
+ b.WriteString(class[0])
+ } else {
+ b.WriteString(t.Name())
+ }
b.WriteString(" " + where)
return b.String()
}
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 0000000000..e256a31e00
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 0000000000..0e9d6edc01
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+ - 1.3
+ - 1.4
+script:
+ - go test
+ - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 0000000000..7805d36de7
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 0000000000..0200f75b4d
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"age"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ age: 30
+ name: John
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err = yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 0000000000..5860074026
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ if v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ } else {
+ v = reflect.New(v.Type().Elem())
+ }
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 0000000000..4fb4054a8b
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshalling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go
index 6f986b1894..a9bef150a3 100644
--- a/vendor/github.com/go-ole/go-ole/com.go
+++ b/vendor/github.com/go-ole/go-ole/com.go
@@ -9,32 +9,32 @@ import (
)
var (
- procCoInitialize, _ = modole32.FindProc("CoInitialize")
- procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx")
- procCoUninitialize, _ = modole32.FindProc("CoUninitialize")
- procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance")
- procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree")
- procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID")
- procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString")
- procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID")
- procStringFromIID, _ = modole32.FindProc("StringFromIID")
- procIIDFromString, _ = modole32.FindProc("IIDFromString")
- procCoGetObject, _ = modole32.FindProc("CoGetObject")
- procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID")
- procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory")
- procVariantInit, _ = modoleaut32.FindProc("VariantInit")
- procVariantClear, _ = modoleaut32.FindProc("VariantClear")
- procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime")
- procSysAllocString, _ = modoleaut32.FindProc("SysAllocString")
- procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen")
- procSysFreeString, _ = modoleaut32.FindProc("SysFreeString")
- procSysStringLen, _ = modoleaut32.FindProc("SysStringLen")
- procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo")
- procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch")
- procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject")
-
- procGetMessageW, _ = moduser32.FindProc("GetMessageW")
- procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW")
+ procCoInitialize = modole32.NewProc("CoInitialize")
+ procCoInitializeEx = modole32.NewProc("CoInitializeEx")
+ procCoUninitialize = modole32.NewProc("CoUninitialize")
+ procCoCreateInstance = modole32.NewProc("CoCreateInstance")
+ procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
+ procCLSIDFromProgID = modole32.NewProc("CLSIDFromProgID")
+ procCLSIDFromString = modole32.NewProc("CLSIDFromString")
+ procStringFromCLSID = modole32.NewProc("StringFromCLSID")
+ procStringFromIID = modole32.NewProc("StringFromIID")
+ procIIDFromString = modole32.NewProc("IIDFromString")
+ procCoGetObject = modole32.NewProc("CoGetObject")
+ procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID")
+ procCopyMemory = modkernel32.NewProc("RtlMoveMemory")
+ procVariantInit = modoleaut32.NewProc("VariantInit")
+ procVariantClear = modoleaut32.NewProc("VariantClear")
+ procVariantTimeToSystemTime = modoleaut32.NewProc("VariantTimeToSystemTime")
+ procSysAllocString = modoleaut32.NewProc("SysAllocString")
+ procSysAllocStringLen = modoleaut32.NewProc("SysAllocStringLen")
+ procSysFreeString = modoleaut32.NewProc("SysFreeString")
+ procSysStringLen = modoleaut32.NewProc("SysStringLen")
+ procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo")
+ procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch")
+ procGetActiveObject = modoleaut32.NewProc("GetActiveObject")
+
+ procGetMessageW = moduser32.NewProc("GetMessageW")
+ procDispatchMessageW = moduser32.NewProc("DispatchMessageW")
)
// coInitialize initializes COM library on current thread.
diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go
index 6ec180b55f..b399f04791 100644
--- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go
+++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go
@@ -185,7 +185,9 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}
uintptr(unsafe.Pointer(&excepInfo)),
0)
if hr != 0 {
- err = NewErrorWithSubError(hr, BstrToString(excepInfo.bstrDescription), excepInfo)
+ excepInfo.renderStrings()
+ excepInfo.Clear()
+ err = NewErrorWithSubError(hr, excepInfo.description, excepInfo)
}
for i, varg := range vargs {
n := len(params) - i - 1
diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go
index e2ae4f4bbf..dbd132bbd7 100644
--- a/vendor/github.com/go-ole/go-ole/ole.go
+++ b/vendor/github.com/go-ole/go-ole/ole.go
@@ -3,6 +3,7 @@ package ole
import (
"fmt"
"strings"
+ "unsafe"
)
// DISPPARAMS are the arguments that passed to methods or property.
@@ -24,6 +25,56 @@ type EXCEPINFO struct {
pvReserved uintptr
pfnDeferredFillIn uintptr
scode uint32
+
+ // Go-specific part. Don't move upper cos it'll break structure layout for native code.
+ rendered bool
+ source string
+ description string
+ helpFile string
+}
+
+// renderStrings translates BSTR strings to Go ones so `.Error` and `.String`
+// could be safely called after `.Clear`. We need this when we can't rely on
+// a caller to call `.Clear`.
+func (e *EXCEPINFO) renderStrings() {
+ e.rendered = true
+ if e.bstrSource == nil {
+ e.source = ""
+ } else {
+ e.source = BstrToString(e.bstrSource)
+ }
+ if e.bstrDescription == nil {
+ e.description = ""
+ } else {
+ e.description = BstrToString(e.bstrDescription)
+ }
+ if e.bstrHelpFile == nil {
+ e.helpFile = ""
+ } else {
+ e.helpFile = BstrToString(e.bstrHelpFile)
+ }
+}
+
+// Clear frees BSTR strings inside an EXCEPINFO and set it to NULL.
+func (e *EXCEPINFO) Clear() {
+ freeBSTR := func(s *uint16) {
+ // SysFreeString don't return errors and is safe for call's on NULL.
+ // https://docs.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-sysfreestring
+ _ = SysFreeString((*int16)(unsafe.Pointer(s)))
+ }
+
+ if e.bstrSource != nil {
+ freeBSTR(e.bstrSource)
+ e.bstrSource = nil
+ }
+ if e.bstrDescription != nil {
+ freeBSTR(e.bstrDescription)
+ e.bstrDescription = nil
+ }
+ if e.bstrHelpFile != nil {
+ freeBSTR(e.bstrHelpFile)
+ e.bstrHelpFile = nil
+ }
}
// WCode return wCode in EXCEPINFO.
@@ -38,48 +89,30 @@ func (e EXCEPINFO) SCODE() uint32 {
// String convert EXCEPINFO to string.
func (e EXCEPINFO) String() string {
- var src, desc, hlp string
- if e.bstrSource == nil {
- src = ""
- } else {
- src = BstrToString(e.bstrSource)
- }
-
- if e.bstrDescription == nil {
- desc = ""
- } else {
- desc = BstrToString(e.bstrDescription)
+ if !e.rendered {
+ e.renderStrings()
}
-
- if e.bstrHelpFile == nil {
- hlp = ""
- } else {
- hlp = BstrToString(e.bstrHelpFile)
- }
-
return fmt.Sprintf(
"wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x",
- e.wCode, src, desc, hlp, e.dwHelpContext, e.scode,
+ e.wCode, e.source, e.description, e.helpFile, e.dwHelpContext, e.scode,
)
}
// Error implements error interface and returns error string.
func (e EXCEPINFO) Error() string {
- if e.bstrDescription != nil {
- return strings.TrimSpace(BstrToString(e.bstrDescription))
+ if !e.rendered {
+ e.renderStrings()
}
- src := "Unknown"
- if e.bstrSource != nil {
- src = BstrToString(e.bstrSource)
+ if e.description != "" {
+ return strings.TrimSpace(e.description)
}
code := e.scode
if e.wCode != 0 {
code = uint32(e.wCode)
}
-
- return fmt.Sprintf("%v: %#x", src, code)
+ return fmt.Sprintf("%v: %#x", e.source, code)
}
// PARAMDATA defines parameter data type.
diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go
index b48a2394d1..0c1b3a10ff 100644
--- a/vendor/github.com/go-ole/go-ole/safearray_windows.go
+++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go
@@ -7,35 +7,35 @@ import (
)
var (
- procSafeArrayAccessData, _ = modoleaut32.FindProc("SafeArrayAccessData")
- procSafeArrayAllocData, _ = modoleaut32.FindProc("SafeArrayAllocData")
- procSafeArrayAllocDescriptor, _ = modoleaut32.FindProc("SafeArrayAllocDescriptor")
- procSafeArrayAllocDescriptorEx, _ = modoleaut32.FindProc("SafeArrayAllocDescriptorEx")
- procSafeArrayCopy, _ = modoleaut32.FindProc("SafeArrayCopy")
- procSafeArrayCopyData, _ = modoleaut32.FindProc("SafeArrayCopyData")
- procSafeArrayCreate, _ = modoleaut32.FindProc("SafeArrayCreate")
- procSafeArrayCreateEx, _ = modoleaut32.FindProc("SafeArrayCreateEx")
- procSafeArrayCreateVector, _ = modoleaut32.FindProc("SafeArrayCreateVector")
- procSafeArrayCreateVectorEx, _ = modoleaut32.FindProc("SafeArrayCreateVectorEx")
- procSafeArrayDestroy, _ = modoleaut32.FindProc("SafeArrayDestroy")
- procSafeArrayDestroyData, _ = modoleaut32.FindProc("SafeArrayDestroyData")
- procSafeArrayDestroyDescriptor, _ = modoleaut32.FindProc("SafeArrayDestroyDescriptor")
- procSafeArrayGetDim, _ = modoleaut32.FindProc("SafeArrayGetDim")
- procSafeArrayGetElement, _ = modoleaut32.FindProc("SafeArrayGetElement")
- procSafeArrayGetElemsize, _ = modoleaut32.FindProc("SafeArrayGetElemsize")
- procSafeArrayGetIID, _ = modoleaut32.FindProc("SafeArrayGetIID")
- procSafeArrayGetLBound, _ = modoleaut32.FindProc("SafeArrayGetLBound")
- procSafeArrayGetUBound, _ = modoleaut32.FindProc("SafeArrayGetUBound")
- procSafeArrayGetVartype, _ = modoleaut32.FindProc("SafeArrayGetVartype")
- procSafeArrayLock, _ = modoleaut32.FindProc("SafeArrayLock")
- procSafeArrayPtrOfIndex, _ = modoleaut32.FindProc("SafeArrayPtrOfIndex")
- procSafeArrayUnaccessData, _ = modoleaut32.FindProc("SafeArrayUnaccessData")
- procSafeArrayUnlock, _ = modoleaut32.FindProc("SafeArrayUnlock")
- procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement")
- //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO
- //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO
- procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo")
- procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo")
+ procSafeArrayAccessData = modoleaut32.NewProc("SafeArrayAccessData")
+ procSafeArrayAllocData = modoleaut32.NewProc("SafeArrayAllocData")
+ procSafeArrayAllocDescriptor = modoleaut32.NewProc("SafeArrayAllocDescriptor")
+ procSafeArrayAllocDescriptorEx = modoleaut32.NewProc("SafeArrayAllocDescriptorEx")
+ procSafeArrayCopy = modoleaut32.NewProc("SafeArrayCopy")
+ procSafeArrayCopyData = modoleaut32.NewProc("SafeArrayCopyData")
+ procSafeArrayCreate = modoleaut32.NewProc("SafeArrayCreate")
+ procSafeArrayCreateEx = modoleaut32.NewProc("SafeArrayCreateEx")
+ procSafeArrayCreateVector = modoleaut32.NewProc("SafeArrayCreateVector")
+ procSafeArrayCreateVectorEx = modoleaut32.NewProc("SafeArrayCreateVectorEx")
+ procSafeArrayDestroy = modoleaut32.NewProc("SafeArrayDestroy")
+ procSafeArrayDestroyData = modoleaut32.NewProc("SafeArrayDestroyData")
+ procSafeArrayDestroyDescriptor = modoleaut32.NewProc("SafeArrayDestroyDescriptor")
+ procSafeArrayGetDim = modoleaut32.NewProc("SafeArrayGetDim")
+ procSafeArrayGetElement = modoleaut32.NewProc("SafeArrayGetElement")
+ procSafeArrayGetElemsize = modoleaut32.NewProc("SafeArrayGetElemsize")
+ procSafeArrayGetIID = modoleaut32.NewProc("SafeArrayGetIID")
+ procSafeArrayGetLBound = modoleaut32.NewProc("SafeArrayGetLBound")
+ procSafeArrayGetUBound = modoleaut32.NewProc("SafeArrayGetUBound")
+ procSafeArrayGetVartype = modoleaut32.NewProc("SafeArrayGetVartype")
+ procSafeArrayLock = modoleaut32.NewProc("SafeArrayLock")
+ procSafeArrayPtrOfIndex = modoleaut32.NewProc("SafeArrayPtrOfIndex")
+ procSafeArrayUnaccessData = modoleaut32.NewProc("SafeArrayUnaccessData")
+ procSafeArrayUnlock = modoleaut32.NewProc("SafeArrayUnlock")
+ procSafeArrayPutElement = modoleaut32.NewProc("SafeArrayPutElement")
+ //procSafeArrayRedim = modoleaut32.NewProc("SafeArrayRedim") // TODO
+ //procSafeArraySetIID = modoleaut32.NewProc("SafeArraySetIID") // TODO
+ procSafeArrayGetRecordInfo = modoleaut32.NewProc("SafeArrayGetRecordInfo")
+ procSafeArraySetRecordInfo = modoleaut32.NewProc("SafeArraySetRecordInfo")
)
// safeArrayAccessData returns raw array pointer.
diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go
index 259f488ec7..da737293d7 100644
--- a/vendor/github.com/go-ole/go-ole/safearrayconversion.go
+++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go
@@ -84,13 +84,13 @@ func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) {
safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
values[i] = v
case VT_BSTR:
- var v string
- safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ v , _ := safeArrayGetElementString(sac.Array, i)
values[i] = v
case VT_VARIANT:
var v VARIANT
safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
values[i] = v.Value()
+ v.Clear()
default:
// TODO
}
diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go
index ebe00f1cfc..a6add1b006 100644
--- a/vendor/github.com/go-ole/go-ole/variables.go
+++ b/vendor/github.com/go-ole/go-ole/variables.go
@@ -3,14 +3,13 @@
package ole
import (
- "syscall"
+ "golang.org/x/sys/windows"
)
var (
- modcombase = syscall.NewLazyDLL("combase.dll")
- modkernel32, _ = syscall.LoadDLL("kernel32.dll")
- modole32, _ = syscall.LoadDLL("ole32.dll")
- modoleaut32, _ = syscall.LoadDLL("oleaut32.dll")
- modmsvcrt, _ = syscall.LoadDLL("msvcrt.dll")
- moduser32, _ = syscall.LoadDLL("user32.dll")
+ modcombase = windows.NewLazySystemDLL("combase.dll")
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ modole32 = windows.NewLazySystemDLL("ole32.dll")
+ modoleaut32 = windows.NewLazySystemDLL("oleaut32.dll")
+ moduser32 = windows.NewLazySystemDLL("user32.dll")
)
diff --git a/vendor/github.com/go-ole/go-ole/variant_arm.go b/vendor/github.com/go-ole/go-ole/variant_arm.go
new file mode 100644
index 0000000000..d472454443
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_arm.go
@@ -0,0 +1,11 @@
+// +build arm
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_arm64.go b/vendor/github.com/go-ole/go-ole/variant_arm64.go
new file mode 100644
index 0000000000..78473cec4f
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_arm64.go
@@ -0,0 +1,13 @@
+//go:build arm64
+// +build arm64
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+ _ [8]byte // 24
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm.go b/vendor/github.com/go-ole/go-ole/variant_date_arm.go
new file mode 100644
index 0000000000..09ec7b5cfd
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_date_arm.go
@@ -0,0 +1,22 @@
+// +build windows,arm
+
+package ole
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// GetVariantDate converts COM Variant Time value to Go time.Time.
+func GetVariantDate(value uint64) (time.Time, error) {
+ var st syscall.Systemtime
+ v1 := uint32(value)
+ v2 := uint32(value >> 32)
+ r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st)))
+ if r != 0 {
+ return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
+ }
+ return time.Now(), errors.New("Could not convert to time, passing current time.")
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm64.go b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go
new file mode 100644
index 0000000000..02b04a0d4a
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go
@@ -0,0 +1,23 @@
+//go:build windows && arm64
+// +build windows,arm64
+
+package ole
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// GetVariantDate converts COM Variant Time value to Go time.Time.
+func GetVariantDate(value uint64) (time.Time, error) {
+ var st syscall.Systemtime
+ v1 := uint32(value)
+ v2 := uint32(value >> 32)
+ r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st)))
+ if r != 0 {
+ return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
+ }
+ return time.Now(), errors.New("Could not convert to time, passing current time.")
+}
diff --git a/vendor/github.com/jaypipes/ghw/.gitignore b/vendor/github.com/jaypipes/ghw/.gitignore
new file mode 100644
index 0000000000..34d0d840aa
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/.gitignore
@@ -0,0 +1,3 @@
+vendor/
+coverage*.*
+*~
diff --git a/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md b/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..a4b377145d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md
@@ -0,0 +1,134 @@
+
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+[INSERT CONTACT METHOD].
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
+at [https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
+
diff --git a/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md b/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md
new file mode 100644
index 0000000000..b790517be6
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md
@@ -0,0 +1,54 @@
+# How to Contribute
+
+We welcome any and all contributions to `ghw`! Filing [bug reports][gh-issues],
+asking questions and submitting patches are all encouraged.
+
+[gh-issues]: https://github.com/jaypipes/ghw/issues
+
+## Submitting patches via pull requests
+
+We use GitHub pull requests to review code submissions.
+
+Consult [GitHub Help][pr-help] for more information on using pull requests.
+
+[pr-help]: https://help.github.com/articles/about-pull-requests/
+
+We ask that contributors submitting a pull request sign their commits and
+attest to the Developer Certificate of Origin (DCO).
+
+## Developer Certificate of Origin
+
+The DCO is a lightweight way for contributors to certify that they wrote or
+otherwise have the right to submit the code they are contributing to the
+project. Here is the [full text of the DCO][dco], reformatted for readability:
+
+> By making a contribution to this project, I certify that:
+>
+> a. The contribution was created in whole or in part by me and I have the
+> right to submit it under the open source license indicated in the file; or
+>
+> b. The contribution is based upon previous work that, to the best of my
+> knowledge, is covered under an appropriate open source license and I have the
+> right under that license to submit that work with modifications, whether
+> created in whole or in part by me, under the same open source license (unless
+> I am permitted to submit under a different license), as indicated in the
+> file; or
+>
+> c. The contribution was provided directly to me by some other person who
+> certified (a), (b) or (c) and I have not modified it.
+>
+> d. I understand and agree that this project and the contribution are public
+> and that a record of the contribution (including all personal information I
+> submit with it, including my sign-off) is maintained indefinitely and may be
+> redistributed consistent with this project or the open source license(s)
+> involved.
+
+[dco]: https://developercertificate.org/
+
+You can sign your commits using `git commit -s` before pushing commits to
+Github and creating a pull request.
+
+## Community Guidelines
+
+1. Be kind.
+2. Seriously, that's it.
diff --git a/vendor/github.com/jaypipes/ghw/COPYING b/vendor/github.com/jaypipes/ghw/COPYING
new file mode 100644
index 0000000000..68c771a099
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/COPYING
@@ -0,0 +1,176 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
diff --git a/vendor/github.com/jaypipes/ghw/Dockerfile b/vendor/github.com/jaypipes/ghw/Dockerfile
new file mode 100644
index 0000000000..cbd587d6c6
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/Dockerfile
@@ -0,0 +1,26 @@
+FROM golang:1.15-buster as builder
+WORKDIR /go/src/github.com/jaypipes/ghw
+
+# Force the go compiler to use modules.
+ENV GO111MODULE=on
+ENV GOPROXY=direct
+
+# go.mod and go.sum go into their own layers.
+COPY go.mod .
+COPY go.sum .
+
+# This ensures `go mod download` happens only when go.mod and go.sum change.
+RUN go mod download
+
+COPY . .
+
+RUN CGO_ENABLED=0 go build -o ghwc ./cmd/ghwc/
+
+FROM alpine:3.7
+RUN apk add --no-cache ethtool
+
+WORKDIR /bin
+
+COPY --from=builder /go/src/github.com/jaypipes/ghw/ghwc /bin
+
+CMD ghwc
diff --git a/vendor/github.com/jaypipes/ghw/Makefile b/vendor/github.com/jaypipes/ghw/Makefile
new file mode 100644
index 0000000000..c7e0db4020
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/Makefile
@@ -0,0 +1,39 @@
+VENDOR := vendor
+PKGS := $(shell go list ./... | grep -v /$(VENDOR)/)
+SRC = $(shell find . -type f -name '*.go' -not -path "*/$(VENDOR)/*")
+BIN_DIR := $(GOPATH)/bin
+GOMETALINTER := $(BIN_DIR)/gometalinter
+
+.PHONY: test
+test: vet
+ go test $(PKGS)
+
+$(GOMETALINTER):
+ go get -u github.com/alecthomas/gometalinter
+ $(GOMETALINTER) --install &> /dev/null
+
+.PHONY: lint
+lint: $(GOMETALINTER)
+ $(GOMETALINTER) ./... --vendor
+
+.PHONY: fmt
+fmt:
+ @echo "Running gofmt on all sources..."
+ @gofmt -s -l -w $(SRC)
+
+.PHONY: fmtcheck
+fmtcheck:
+ @bash -c "diff -u <(echo -n) <(gofmt -d $(SRC))"
+
+.PHONY: vet
+vet:
+ go vet $(PKGS)
+
+.PHONY: cover
+cover:
+ $(shell [ -e coverage.out ] && rm coverage.out)
+ @echo "mode: count" > coverage-all.out
+ $(foreach pkg,$(PKGS),\
+ go test -coverprofile=coverage.out -covermode=count $(pkg);\
+ tail -n +2 coverage.out >> coverage-all.out;)
+ go tool cover -html=coverage-all.out -o=coverage-all.html
diff --git a/vendor/github.com/jaypipes/ghw/README.md b/vendor/github.com/jaypipes/ghw/README.md
new file mode 100644
index 0000000000..b0c742d279
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/README.md
@@ -0,0 +1,1391 @@
+# `ghw` - Golang HardWare discovery/inspection library
+
+[![Build Status](https://github.com/jaypipes/ghw/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/jaypipes/ghw/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/jaypipes/ghw)](https://goreportcard.com/report/github.com/jaypipes/ghw)
+[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md)
+
+![ghw mascot](images/ghw-gopher.png)
+
+`ghw` is a small Golang library providing hardware inspection and discovery
+for Linux and Windows. There currently exists partial support for MacOSX.
+
+## Design Principles
+
+* No root privileges needed for discovery
+
+ `ghw` goes the extra mile to be useful without root priveleges. We query for
+ host hardware information as directly as possible without relying on shellouts
+ to programs like `dmidecode` that require root privileges to execute.
+
+ Elevated privileges are indeed required to query for some information, but
+ `ghw` will never error out if blocked from reading that information. Instead,
+ `ghw` will print a warning message about the information that could not be
+ retrieved. You may disable these warning messages with `GHW_DISABLE_WARNINGS`
+ environment variable.
+
+* Well-documented code and plenty of example code
+
+ The code itself should be well-documented with lots of usage
+ examples.
+
+* Interfaces should be consistent across modules
+
+ Each module in the library should be structured in a consistent fashion, and
+ the structs returned by various library functions should have consistent
+ attribute and method names.
+
+## Inspecting != Monitoring
+
+`ghw` is a tool for gathering information about your hardware's **capacity**
+and **capabilities**.
+
+It is important to point out that `ghw` does **NOT** report information that is
+temporary or variable. It is **NOT** a system monitor nor is it an appropriate
+tool for gathering data points for metrics that change over time. If you are
+looking for a system that tracks usage of CPU, memory, network I/O or disk I/O,
+there are plenty of great open source tools that do this! Check out the
+[Prometheus project](https://prometheus.io/) for a great example.
+
+## Usage
+
+You can use the functions in `ghw` to determine various hardware-related
+information about the host computer:
+
+* [Memory](#memory)
+* [CPU](#cpu)
+* [Block storage](#block-storage)
+* [Topology](#topology)
+* [Network](#network)
+* [PCI](#pci)
+* [GPU](#gpu)
+* [Chassis](#chassis)
+* [BIOS](#bios)
+* [Baseboard](#baseboard)
+* [Product](#product)
+* [YAML and JSON serialization](#serialization)
+
+### Overriding the root mountpoint `ghw` uses
+
+The default root mountpoint that `ghw` uses when looking for information about
+the host system is `/`. So, for example, when looking up CPU information on a
+Linux system, `ghw.CPU()` will use the path `/proc/cpuinfo`.
+
+If you are calling `ghw` from a system that has an alternate root mountpoint,
+you can either set the `GHW_CHROOT` environment variable to that alternate
+path, or call the module constructor function with the `ghw.WithChroot()`
+modifier.
+
+For example, if you are executing from within an application container that has
+bind-mounted the root host filesystem to the mount point `/host`, you would set
+`GHW_CHROOT` to `/host` so that `ghw` can find `/proc/cpuinfo` at
+`/host/proc/cpuinfo`.
+
+Alternately, you can use the `ghw.WithChroot()` function like so:
+
+```go
+cpu, err := ghw.CPU(ghw.WithChroot("/host"))
+```
+
+### Overriding the per-mountpoint `ghw` uses
+
+When running inside containers, it could be a bit cumbersome to just override
+the root mountpoint. Inside containers, when granting access to the host
+file systems, is more common to bind-mount them in non standard location,
+like `/sys` on `/host-sys` or `/proc` on `/host-proc`.
+Is rarer to mount them in a common subtree (e.g. `/sys` on `/host/sys` and
+ `/proc` on /host/proc...)
+
+To better cover this use case, `ghw` allows to *programmatically* override
+the initial component of filesystems subtrees, allowing to access `sysfs`
+(or `procfs` or...) mounted on non-standard locations.
+
+
+```go
+cpu, err := ghw.CPU(ghw.WithPathOverrides(ghw.PathOverrides{
+ "/proc": "/host-proc",
+ "/sys": "/host-sys",
+}))
+```
+
+Please note
+- this feature works in addition and is composable with the
+ `WithChroot`/`GHW_CHROOT` feature.
+- `ghw` doesn't support yet environs variable to override individual
+ mountpoints, because this could lead to significant environs variables
+ proliferation.
+
+### Consuming snapshots
+
+You can make `ghw` read from snapshots (created with `ghw-snapshot`) using
+environment variables or programmatically.
+Please check `SNAPSHOT.md` to learn more about how ghw creates and consumes
+snapshots.
+
+The environment variable `GHW_SNAPSHOT_PATH` let users specify a snapshot
+that `ghw` will automatically consume. All the needed chroot changes will be
+automatically performed. By default, the snapshot is unpacked on a temporary
+directory managed by `ghw`, and cleaned up when no longer needed, avoiding
+leftovers.
+
+The rest of the environment variables are relevant iff `GHW_SNAPSHOT_PATH` is given.
+`GHW_SNAPSHOT_ROOT` let users specify the directory
+on which the snapshot should be unpacked. This moves the ownership of that
+directory from `ghw` to users. For this reason, `ghw` will *not* clean up automatically
+the content unpacked in `GHW_SNAPSHOT_ROOT`.
+
+`GHW_SNAPSHOT_EXCLUSIVE` is relevant iff `GHW_SNAPSHOT_ROOT` is given.
+Set it to any value to toggle it on. This tells `ghw` that the directory is meant
+only to contain the given snapshot, thus `ghw` will *not* attempt to unpack it
+(and will go ahead silently!) unless the directory is empty.
+You can use both `GHW_SNAPSHOT_ROOT` and `GHW_SNAPSHOT_EXCLUSIVE` to make sure
+`ghw` unpacks the snapshot only once regardless of how many `ghw` packages
+(e.g. cpu, memory) access it.
+
+Set `GHW_SNAPSHOT_PRESERVE` to any value to enable it. If set, `ghw` will *not*
+clean up the unpacked snapshot once done, leaving it into the system.
+
+```go
+cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{
+ Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz",
+}))
+
+
+myRoot := "/my/safe/directory"
+cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{
+ Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz",
+ Root: &myRoot,
+}))
+
+myOtherRoot := "/my/other/safe/directory"
+cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{
+ Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz",
+ Root: &myOtherRoot,
+ Exclusive: true,
+}))
+```
+
+### Creating snapshots
+
+You can create ghw snapshots in two ways.
+You can just consume the `ghw-snapshot` tool, or you can create them programmatically
+from your golang code. We explore now the latter case.
+
+Snapshotting takes two phases:
+1. clone the relevant pseudofiles/pseudodirectories into a temporary tree
+ This tree is usually deleted once the packing is successful.
+2. pack the cloned tree into a tar.gz
+
+```go
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/jaypipes/ghw/pkg/snapshot"
+)
+
+// ...
+
+scratchDir, err := ioutil.TempDir("", "ghw-snapshot-*")
+if err != nil {
+ fmt.Printf("Error creating clone directory: %v", err)
+}
+defer os.RemoveAll(scratchDir)
+
+// this step clones all the files and directories ghw cares about
+if err := snapshot.CloneTreeInto(scratchDir); err != nil {
+ fmt.Printf("error cloning into %q: %v", scratchDir, err)
+}
+
+// optionally, you may add extra content into your snapshot.
+// ghw will ignore the extra content.
+// Glob patterns like `filepath.Glob` are supported.
+fileSpecs := []string{
+ "/proc/cmdline",
+}
+
+// options allows the client code to optionally deference symlinks, or copy
+// them into the cloned tree as symlinks
+var opts *snapshot.CopyFileOptions
+if err := snapshot.CopyFilesInto(fileSpecs, scratchDir, opts); err != nil {
+ fmt.Printf("error cloning extra files into %q: %v", scratchDir, err)
+}
+
+// automates the creation of the gzipped tarball out of the given tree.
+if err := snapshot.PackFrom("my-snapshot.tgz", scratchDir); err != nil {
+ fmt.Printf("error packing %q into %q: %v", scratchDir, *output, err)
+}
+```
+
+### Disabling warning messages
+
+When `ghw` isn't able to retrieve some information, it may print certain
+warning messages to `stderr`. To disable these warnings, simply set the
+`GHW_DISABLE_WARNINGS` environs variable:
+
+```
+$ ghwc memory
+WARNING:
+Could not determine total physical bytes of memory. This may
+be due to the host being a virtual machine or container with no
+/var/log/syslog file, or the current user may not have necessary
+privileges to read the syslog. We are falling back to setting the
+total physical amount of memory to the total usable amount of memory
+memory (24GB physical, 24GB usable)
+```
+
+```
+$ GHW_DISABLE_WARNINGS=1 ghwc memory
+memory (24GB physical, 24GB usable)
+```
+
+You can disable warning programmatically using the `WithDisableWarnings` option:
+
+```go
+
+import (
+ "github.com/jaypipes/ghw"
+)
+
+mem, err := ghw.Memory(ghw.WithDisableWarnings())
+```
+
+`WithDisableWarnings` is a alias for the `WithNullAlerter` option, which in turn
+leverages the more general `Alerter` feature of ghw.
+
+You may supply a `Alerter` to ghw to redirect all the warnings there, like
+logger objects (see for example golang's stdlib `log.Logger`).
+`Alerter` is in fact the minimal logging interface `ghw needs.
+To learn more, please check the `option.Alerter` interface and the `ghw.WithAlerter()`
+function.
+
+### Memory
+
+The basic building block of the memory support in ghw is the `ghw.MemoryArea` struct.
+A "memory area" is a block of memory which share common properties. In the simplest
+case, the whole system memory fits in a single memory area; in more complex scenarios,
+like multi-NUMA systems, many memory areas may be present in the system (e.g. one for
+each NUMA cell).
+
+The `ghw.MemoryArea` struct contains the following fields:
+
+* `ghw.MemoryInfo.TotalPhysicalBytes` contains the amount of physical memory on
+ the host
+* `ghw.MemoryInfo.TotalUsableBytes` contains the amount of memory the
+ system can actually use. Usable memory accounts for things like the kernel's
+ resident memory size and some reserved system bits
+
+Information about the host computer's memory can be retrieved using the
+`ghw.Memory()` function which returns a pointer to a `ghw.MemoryInfo` struct.
+`ghw.MemoryInfo` is a superset of `ghw.MemoryArea`. Thus, it contains all the
+fields found in the `ghw.MemoryArea` (replicated for clarity) plus some:
+
+* `ghw.MemoryInfo.TotalPhysicalBytes` contains the amount of physical memory on
+ the host
+* `ghw.MemoryInfo.TotalUsableBytes` contains the amount of memory the
+ system can actually use. Usable memory accounts for things like the kernel's
+ resident memory size and some reserved system bits
+* `ghw.MemoryInfo.SupportedPageSizes` is an array of integers representing the
+ size, in bytes, of memory pages the system supports
+* `ghw.MemoryInfo.Modules` is an array of pointers to `ghw.MemoryModule`
+ structs, one for each physical [DIMM](https://en.wikipedia.org/wiki/DIMM).
+ Currently, this information is only included on Windows, with Linux support
+ [planned](https://github.com/jaypipes/ghw/pull/171#issuecomment-597082409).
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ memory, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ fmt.Println(memory.String())
+}
+```
+
+Example output from my personal workstation:
+
+```
+memory (24GB physical, 24GB usable)
+```
+
+#### Physical versus Usable Memory
+
+There has been [some](https://github.com/jaypipes/ghw/pull/171)
+[confusion](https://github.com/jaypipes/ghw/issues/183) regarding the
+difference between the total physical bytes versus total usable bytes of
+memory.
+
+Some of this confusion has been due to a misunderstanding of the term "usable".
+As mentioned [above](#inspection!=monitoring), `ghw` does inspection of the
+system's capacity.
+
+A host computer has two capacities when it comes to RAM. The first capacity is
+the amount of RAM that is contained in all memory banks (DIMMs) that are
+attached to the motherboard. `ghw.MemoryInfo.TotalPhysicalBytes` refers to this
+first capacity.
+
+There is a (usually small) amount of RAM that is consumed by the bootloader
+before the operating system is started (booted). Once the bootloader has booted
+the operating system, the amount of RAM that may be used by the operating
+system and its applications is fixed. `ghw.MemoryInfo.TotalUsableBytes` refers
+to this second capacity.
+
+You can determine the amount of RAM that the bootloader used (that is not made
+available to the operating system) by subtracting
+`ghw.MemoryInfo.TotalUsableBytes` from `ghw.MemoryInfo.TotalPhysicalBytes`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ memory, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ phys := memory.TotalPhysicalBytes
+ usable := memory.TotalUsableBytes
+
+ fmt.Printf("The bootloader consumes %d bytes of RAM\n", phys - usable)
+}
+```
+
+Example output from my personal workstation booted into a Windows10 operating
+system with a Linux GRUB bootloader:
+
+```
+The bootloader consumes 3832720 bytes of RAM
+```
+
+### CPU
+
+The `ghw.CPU()` function returns a `ghw.CPUInfo` struct that contains
+information about the CPUs on the host system.
+
+`ghw.CPUInfo` contains the following fields:
+
+* `ghw.CPUInfo.TotalCores` has the total number of physical cores the host
+ system contains
+* `ghw.CPUInfo.TotalThreads` has the total number of hardware threads the
+ host system contains
+* `ghw.CPUInfo.Processors` is an array of `ghw.Processor` structs, one for each
+ physical processor package contained in the host
+
+Each `ghw.Processor` struct contains a number of fields:
+
+* `ghw.Processor.ID` is the physical processor `uint32` ID according to the
+ system
+* `ghw.Processor.NumCores` is the number of physical cores in the processor
+ package
+* `ghw.Processor.NumThreads` is the number of hardware threads in the processor
+ package
+* `ghw.Processor.Vendor` is a string containing the vendor name
+* `ghw.Processor.Model` is a string containing the vendor's model name
+* `ghw.Processor.Capabilities` is an array of strings indicating the features
+ the processor has enabled
+* `ghw.Processor.Cores` is an array of `ghw.ProcessorCore` structs that are
+ packed onto this physical processor
+
+A `ghw.ProcessorCore` has the following fields:
+
+* `ghw.ProcessorCore.ID` is the `uint32` identifier that the host gave this
+ core. Note that this does *not* necessarily equate to a zero-based index of
+ the core within a physical package. For example, the core IDs for an Intel Core
+ i7 are 0, 1, 2, 8, 9, and 10
+* `ghw.ProcessorCore.Index` is the zero-based index of the core on the physical
+ processor package
+* `ghw.ProcessorCore.NumThreads` is the number of hardware threads associated
+ with the core
+* `ghw.ProcessorCore.LogicalProcessors` is an array of logical processor IDs
+ assigned to any processing unit for the core
+
+```go
+package main
+
+import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ cpu, err := ghw.CPU()
+ if err != nil {
+ fmt.Printf("Error getting CPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", cpu)
+
+ for _, proc := range cpu.Processors {
+ fmt.Printf(" %v\n", proc)
+ for _, core := range proc.Cores {
+ fmt.Printf(" %v\n", core)
+ }
+ if len(proc.Capabilities) > 0 {
+ // pretty-print the (large) block of capability strings into rows
+ // of 6 capability strings
+ rows := int(math.Ceil(float64(len(proc.Capabilities)) / float64(6)))
+ for row := 1; row < rows; row = row + 1 {
+ rowStart := (row * 6) - 1
+ rowEnd := int(math.Min(float64(rowStart+6), float64(len(proc.Capabilities))))
+ rowElems := proc.Capabilities[rowStart:rowEnd]
+ capStr := strings.Join(rowElems, " ")
+ if row == 1 {
+ fmt.Printf(" capabilities: [%s\n", capStr)
+ } else if rowEnd < len(proc.Capabilities) {
+ fmt.Printf(" %s\n", capStr)
+ } else {
+ fmt.Printf(" %s]\n", capStr)
+ }
+ }
+ }
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+cpu (1 physical package, 6 cores, 12 hardware threads)
+ physical package #0 (6 cores, 12 hardware threads)
+ processor core #0 (2 threads), logical processors [0 6]
+ processor core #1 (2 threads), logical processors [1 7]
+ processor core #2 (2 threads), logical processors [2 8]
+ processor core #3 (2 threads), logical processors [3 9]
+ processor core #4 (2 threads), logical processors [4 10]
+ processor core #5 (2 threads), logical processors [5 11]
+ capabilities: [msr pae mce cx8 apic sep
+ mtrr pge mca cmov pat pse36
+ clflush dts acpi mmx fxsr sse
+ sse2 ss ht tm pbe syscall
+ nx pdpe1gb rdtscp lm constant_tsc arch_perfmon
+ pebs bts rep_good nopl xtopology nonstop_tsc
+ cpuid aperfmperf pni pclmulqdq dtes64 monitor
+ ds_cpl vmx est tm2 ssse3 cx16
+ xtpr pdcm pcid sse4_1 sse4_2 popcnt
+ aes lahf_lm pti retpoline tpr_shadow vnmi
+ flexpriority ept vpid dtherm ida arat]
+```
+
+### Block storage
+
+Information about the host computer's local block storage is returned from the
+`ghw.Block()` function. This function returns a pointer to a `ghw.BlockInfo`
+struct.
+
+The `ghw.BlockInfo` struct contains two fields:
+
+* `ghw.BlockInfo.TotalPhysicalBytes` contains the amount of physical block
+ storage on the host
+* `ghw.BlockInfo.Disks` is an array of pointers to `ghw.Disk` structs, one for
+ each disk drive found by the system
+
+Each `ghw.Disk` struct contains the following fields:
+
+* `ghw.Disk.Name` contains a string with the short name of the disk, e.g. "sda"
+* `ghw.Disk.SizeBytes` contains the amount of storage the disk provides
+* `ghw.Disk.PhysicalBlockSizeBytes` contains the size of the physical blocks
+ used on the disk, in bytes
+* `ghw.Disk.IsRemovable` contains a boolean indicating if the disk drive is
+ removable
+* `ghw.Disk.DriveType` is the type of drive. It is of type `ghw.DriveType`
+ which has a `ghw.DriveType.String()` method that can be called to return a
+ string representation of the bus. This string will be "HDD", "FDD", "ODD",
+ or "SSD", which correspond to a hard disk drive (rotational), floppy drive,
+ optical (CD/DVD) drive and solid-state drive.
+* `ghw.Disk.StorageController` is the type of storage controller/drive. It is
+ of type `ghw.StorageController` which has a `ghw.StorageController.String()`
+ method that can be called to return a string representation of the bus. This
+ string will be "SCSI", "IDE", "virtio", "MMC", or "NVMe"
+* `ghw.Disk.NUMANodeID` is the numeric index of the NUMA node this disk is
+ local to, or -1
+* `ghw.Disk.Vendor` contains a string with the name of the hardware vendor for
+ the disk drive
+* `ghw.Disk.Model` contains a string with the vendor-assigned disk model name
+* `ghw.Disk.SerialNumber` contains a string with the disk's serial number
+* `ghw.Disk.WWN` contains a string with the disk's
+ [World Wide Name](https://en.wikipedia.org/wiki/World_Wide_Name)
+* `ghw.Disk.Partitions` contains an array of pointers to `ghw.Partition`
+ structs, one for each partition on the disk
+
+Each `ghw.Partition` struct contains these fields:
+
+* `ghw.Partition.Name` contains a string with the short name of the partition,
+ e.g. "sda1"
+* `ghw.Partition.SizeBytes` contains the amount of storage the partition
+ provides
+* `ghw.Partition.MountPoint` contains a string with the partition's mount
+ point, or "" if no mount point was discovered
+* `ghw.Partition.Type` contains a string indicated the filesystem type for the
+ partition, or "" if the system could not determine the type
+* `ghw.Partition.IsReadOnly` is a bool indicating the partition is read-only
+* `ghw.Partition.Disk` is a pointer to the `ghw.Disk` object associated with
+ the partition. This will be `nil` if the `ghw.Partition` struct was returned
+ by the `ghw.DiskPartitions()` library function.
+* `ghw.Partition.UUID` is a string containing the volume UUID on Linux, the
+ partition UUID on MacOS and nothing on Windows.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ block, err := ghw.Block()
+ if err != nil {
+ fmt.Printf("Error getting block storage info: %v", err)
+ }
+
+ fmt.Printf("%v\n", block)
+
+ for _, disk := range block.Disks {
+ fmt.Printf(" %v\n", disk)
+ for _, part := range disk.Partitions {
+ fmt.Printf(" %v\n", part)
+ }
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+block storage (1 disk, 2TB physical storage)
+ sda HDD (2TB) SCSI [@pci-0000:04:00.0-scsi-0:1:0:0 (node #0)] vendor=LSI model=Logical_Volume serial=600508e000000000f8253aac9a1abd0c WWN=0x600508e000000000f8253aac9a1abd0c
+ /dev/sda1 (100MB)
+ /dev/sda2 (187GB)
+ /dev/sda3 (449MB)
+ /dev/sda4 (1KB)
+ /dev/sda5 (15GB)
+ /dev/sda6 (2TB) [ext4] mounted@/
+```
+
+> Note that `ghw` looks in the udev runtime database for some information. If
+> you are using `ghw` in a container, remember to bind mount `/dev/disk` and
+> `/run` into your container, otherwise `ghw` won't be able to query the udev
+> DB or sysfs paths for information.
+
+### Topology
+
+> **NOTE**: Topology support is currently Linux-only. Windows support is
+> [planned](https://github.com/jaypipes/ghw/issues/166).
+
+Information about the host computer's architecture (NUMA vs. SMP), the host's
+node layout and processor caches can be retrieved from the `ghw.Topology()`
+function. This function returns a pointer to a `ghw.TopologyInfo` struct.
+
+The `ghw.TopologyInfo` struct contains two fields:
+
+* `ghw.TopologyInfo.Architecture` contains an enum with the value `ghw.NUMA` or
+ `ghw.SMP` depending on what the topology of the system is
+* `ghw.TopologyInfo.Nodes` is an array of pointers to `ghw.TopologyNode`
+ structs, one for each topology node (typically physical processor package)
+ found by the system
+
+Each `ghw.TopologyNode` struct contains the following fields:
+
+* `ghw.TopologyNode.ID` is the system's `uint32` identifier for the node
+* `ghw.TopologyNode.Cores` is an array of pointers to `ghw.ProcessorCore` structs that
+ are contained in this node
+* `ghw.TopologyNode.Caches` is an array of pointers to `ghw.MemoryCache` structs that
+ represent the low-level caches associated with processors and cores on the
+ system
+* `ghw.TopologyNode.Distance` is an array of distances between NUMA nodes as reported
+ by the system.
+* `ghw.TopologyNode.Memory` is a struct describing the memory attached to this node.
+ Please refer to the documentation of `ghw.MemoryArea`.
+
+See above in the [CPU](#cpu) section for information about the
+`ghw.ProcessorCore` struct and how to use and query it.
+
+Each `ghw.MemoryCache` struct contains the following fields:
+
+* `ghw.MemoryCache.Type` is an enum that contains one of `ghw.DATA`,
+ `ghw.INSTRUCTION` or `ghw.UNIFIED` depending on whether the cache stores CPU
+ instructions, program data, or both
+* `ghw.MemoryCache.Level` is a positive integer indicating how close the cache
+ is to the processor
+* `ghw.MemoryCache.SizeBytes` is an integer containing the number of bytes the
+ cache can contain
+* `ghw.MemoryCache.LogicalProcessors` is an array of integers representing the
+ logical processors that use the cache
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ topology, err := ghw.Topology()
+ if err != nil {
+ fmt.Printf("Error getting topology info: %v", err)
+ }
+
+ fmt.Printf("%v\n", topology)
+
+ for _, node := range topology.Nodes {
+ fmt.Printf(" %v\n", node)
+ for _, cache := range node.Caches {
+ fmt.Printf(" %v\n", cache)
+ }
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+topology SMP (1 nodes)
+ node #0 (6 cores)
+ L1i cache (32 KB) shared with logical processors: 3,9
+ L1i cache (32 KB) shared with logical processors: 2,8
+ L1i cache (32 KB) shared with logical processors: 11,5
+ L1i cache (32 KB) shared with logical processors: 10,4
+ L1i cache (32 KB) shared with logical processors: 0,6
+ L1i cache (32 KB) shared with logical processors: 1,7
+ L1d cache (32 KB) shared with logical processors: 11,5
+ L1d cache (32 KB) shared with logical processors: 10,4
+ L1d cache (32 KB) shared with logical processors: 3,9
+ L1d cache (32 KB) shared with logical processors: 1,7
+ L1d cache (32 KB) shared with logical processors: 0,6
+ L1d cache (32 KB) shared with logical processors: 2,8
+ L2 cache (256 KB) shared with logical processors: 2,8
+ L2 cache (256 KB) shared with logical processors: 3,9
+ L2 cache (256 KB) shared with logical processors: 0,6
+ L2 cache (256 KB) shared with logical processors: 10,4
+ L2 cache (256 KB) shared with logical processors: 1,7
+ L2 cache (256 KB) shared with logical processors: 11,5
+ L3 cache (12288 KB) shared with logical processors: 0,1,10,11,2,3,4,5,6,7,8,9
+```
+
+### Network
+
+Information about the host computer's networking hardware is returned from the
+`ghw.Network()` function. This function returns a pointer to a
+`ghw.NetworkInfo` struct.
+
+The `ghw.NetworkInfo` struct contains one field:
+
+* `ghw.NetworkInfo.NICs` is an array of pointers to `ghw.NIC` structs, one
+ for each network interface controller found for the systen
+
+Each `ghw.NIC` struct contains the following fields:
+
+* `ghw.NIC.Name` is the system's identifier for the NIC
+* `ghw.NIC.MacAddress` is the MAC address for the NIC, if any
+* `ghw.NIC.IsVirtual` is a boolean indicating if the NIC is a virtualized
+ device
+* `ghw.NIC.Capabilities` is an array of pointers to `ghw.NICCapability` structs
+ that can describe the things the NIC supports. These capabilities match the
+ returned values from the `ethtool -k ` call on Linux
+* `ghw.NIC.PCIAddress` is the PCI device address of the device backing the NIC.
+ this is not-nil only if the backing device is indeed a PCI device; more backing
+ devices (e.g. USB) will be added in future versions.
+
+The `ghw.NICCapability` struct contains the following fields:
+
+* `ghw.NICCapability.Name` is the string name of the capability (e.g.
+ "tcp-segmentation-offload")
+* `ghw.NICCapability.IsEnabled` is a boolean indicating whether the capability
+ is currently enabled/active on the NIC
+* `ghw.NICCapability.CanEnable` is a boolean indicating whether the capability
+ may be enabled
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ net, err := ghw.Network()
+ if err != nil {
+ fmt.Printf("Error getting network info: %v", err)
+ }
+
+ fmt.Printf("%v\n", net)
+
+ for _, nic := range net.NICs {
+ fmt.Printf(" %v\n", nic)
+
+ enabledCaps := make([]int, 0)
+ for x, cap := range nic.Capabilities {
+ if cap.IsEnabled {
+ enabledCaps = append(enabledCaps, x)
+ }
+ }
+ if len(enabledCaps) > 0 {
+ fmt.Printf(" enabled capabilities:\n")
+ for _, x := range enabledCaps {
+ fmt.Printf(" - %s\n", nic.Capabilities[x].Name)
+ }
+ }
+ }
+}
+```
+
+Example output from my personal laptop:
+
+```
+net (3 NICs)
+ docker0
+ enabled capabilities:
+ - tx-checksumming
+ - tx-checksum-ip-generic
+ - scatter-gather
+ - tx-scatter-gather
+ - tx-scatter-gather-fraglist
+ - tcp-segmentation-offload
+ - tx-tcp-segmentation
+ - tx-tcp-ecn-segmentation
+ - tx-tcp-mangleid-segmentation
+ - tx-tcp6-segmentation
+ - udp-fragmentation-offload
+ - generic-segmentation-offload
+ - generic-receive-offload
+ - tx-vlan-offload
+ - highdma
+ - tx-lockless
+ - netns-local
+ - tx-gso-robust
+ - tx-fcoe-segmentation
+ - tx-gre-segmentation
+ - tx-gre-csum-segmentation
+ - tx-ipxip4-segmentation
+ - tx-ipxip6-segmentation
+ - tx-udp_tnl-segmentation
+ - tx-udp_tnl-csum-segmentation
+ - tx-gso-partial
+ - tx-sctp-segmentation
+ - tx-esp-segmentation
+ - tx-vlan-stag-hw-insert
+ enp58s0f1
+ enabled capabilities:
+ - rx-checksumming
+ - generic-receive-offload
+ - rx-vlan-offload
+ - tx-vlan-offload
+ - highdma
+ wlp59s0
+ enabled capabilities:
+ - scatter-gather
+ - tx-scatter-gather
+ - generic-segmentation-offload
+ - generic-receive-offload
+ - highdma
+ - netns-local
+```
+
+### PCI
+
+`ghw` contains a PCI database inspection and querying facility that allows
+developers to not only gather information about devices on a local PCI bus but
+also query for information about hardware device classes, vendor and product
+information.
+
+**NOTE**: Parsing of the PCI-IDS file database is provided by the separate
+[github.com/jaypipes/pcidb library](http://github.com/jaypipes/pcidb). You can
+read that library's README for more information about the various structs that
+are exposed on the `ghw.PCIInfo` struct.
+
+The `ghw.PCI()` function returns a `ghw.PCIInfo` struct. The `ghw.PCIInfo`
+struct contains a number of fields that may be queried for PCI information:
+
+* `ghw.PCIInfo.Devices` is a slice of pointers to `ghw.PCIDevice` structs that
+ describe the PCI devices on the host system
+* `ghw.PCIInfo.Classes` is a map, keyed by the PCI class ID (a hex-encoded
+ string) of pointers to `pcidb.Class` structs, one for each class of PCI
+ device known to `ghw`
+ (**DEPRECATED**, will be removed in `ghw` `v1.0`. Use the
+ `github.com/jaypipes/pcidb` library for exploring PCI database information)
+* `ghw.PCIInfo.Vendors` is a map, keyed by the PCI vendor ID (a hex-encoded
+ string) of pointers to `pcidb.Vendor` structs, one for each PCI vendor
+ known to `ghw`
+ (**DEPRECATED**, will be removed in `ghw` `v1.0`. Use the
+ `github.com/jaypipes/pcidb` library for exploring PCI database information)
+* `ghw.PCIInfo.Products` is a map, keyed by the PCI product ID (a hex-encoded
+ string) of pointers to `pcidb.Product` structs, one for each PCI product
+ known to `ghw`
+ (**DEPRECATED**, will be removed in `ghw` `v1.0`. Use the
+ `github.com/jaypipes/pcidb` library for exploring PCI database information)
+
+**NOTE**: PCI products are often referred to by their "device ID". We use
+the term "product ID" in `ghw` because it more accurately reflects what the
+identifier is for: a specific product line produced by the vendor.
+
+The `ghw.PCIDevice` struct has the following fields:
+
+* `ghw.PCIDevice.Vendor` is a pointer to a `pcidb.Vendor` struct that
+ describes the device's primary vendor. This will always be non-nil.
+* `ghw.PCIDevice.Product` is a pointer to a `pcidb.Product` struct that
+ describes the device's primary product. This will always be non-nil.
+* `ghw.PCIDevice.Subsystem` is a pointer to a `pcidb.Product` struct that
+ describes the device's secondary/sub-product. This will always be non-nil.
+* `ghw.PCIDevice.Class` is a pointer to a `pcidb.Class` struct that
+ describes the device's class. This will always be non-nil.
+* `ghw.PCIDevice.Subclass` is a pointer to a `pcidb.Subclass` struct
+ that describes the device's subclass. This will always be non-nil.
+* `ghw.PCIDevice.ProgrammingInterface` is a pointer to a
+ `pcidb.ProgrammingInterface` struct that describes the device subclass'
+ programming interface. This will always be non-nil.
+* `ghw.PCIDevice.Driver` is a string representing the device driver the
+ system is using to handle this device. Can be empty string if this
+ information is not available. If the information is not available,
+ this doesn't mean at all the device is not functioning, but only the
+ fact `ghw` was not able to retrieve this information.
+
+The `ghw.PCIAddress` (which is an alias for the `ghw.pci.address.Address`
+struct) contains the PCI address fields. It has a `ghw.PCIAddress.String()`
+method that returns the canonical Domain:Bus:Device.Function ([D]BDF)
+representation of this Address.
+
+The `ghw.PCIAddress` struct has the following fields:
+
+* `ghw.PCIAddress.Domain` is a string representing the PCI domain component of
+ the address.
+* `ghw.PCIAddress.Bus` is a string representing the PCI bus component of
+ the address.
+* `ghw.PCIAddress.Device` is a string representing the PCI device component of
+ the address.
+* `ghw.PCIAddress.Function` is a string representing the PCI function component of
+ the address.
+
+**NOTE**: Older versions (pre-`v0.9.0`) erroneously referred to the `Device`
+field as the `Slot` field. As noted by [@pearsonk](https://github.com/pearsonk)
+in [#220](https://github.com/jaypipes/ghw/issues/220), this was a misnomer.
+
+#### Finding a PCI device by PCI address
+
+In addition to the above information, the `ghw.PCIInfo` struct has the
+following method:
+
+* `ghw.PCIInfo.GetDevice(address string)`
+
+The following code snippet shows how to call the `ghw.PCIInfo.ListDevices()`
+method and output a simple list of PCI address and vendor/product information:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+ fmt.Printf("host PCI devices:\n")
+ fmt.Println("====================================================")
+
+ for _, device := range pci.Devices {
+ vendor := device.Vendor
+ vendorName := vendor.Name
+ if len(vendor.Name) > 20 {
+ vendorName = string([]byte(vendorName)[0:17]) + "..."
+ }
+ product := device.Product
+ productName := product.Name
+ if len(product.Name) > 40 {
+ productName = string([]byte(productName)[0:37]) + "..."
+ }
+ fmt.Printf("%-12s\t%-20s\t%-40s\n", device.Address, vendorName, productName)
+ }
+}
+```
+
+on my local workstation the output of the above looks like the following:
+
+```
+host PCI devices:
+====================================================
+0000:00:00.0 Intel Corporation 5520/5500/X58 I/O Hub to ESI Port
+0000:00:01.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:02.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:03.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:07.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:10.0 Intel Corporation 7500/5520/5500/X58 Physical and Link ...
+0000:00:10.1 Intel Corporation 7500/5520/5500/X58 Routing and Protoc...
+0000:00:14.0 Intel Corporation 7500/5520/5500/X58 I/O Hub System Man...
+0000:00:14.1 Intel Corporation 7500/5520/5500/X58 I/O Hub GPIO and S...
+0000:00:14.2 Intel Corporation 7500/5520/5500/X58 I/O Hub Control St...
+0000:00:14.3 Intel Corporation 7500/5520/5500/X58 I/O Hub Throttle R...
+0000:00:19.0 Intel Corporation 82567LF-2 Gigabit Network Connection
+0000:00:1a.0 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1a.1 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1a.2 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1a.7 Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Cont...
+0000:00:1b.0 Intel Corporation 82801JI (ICH10 Family) HD Audio Contr...
+0000:00:1c.0 Intel Corporation 82801JI (ICH10 Family) PCI Express Ro...
+0000:00:1c.1 Intel Corporation 82801JI (ICH10 Family) PCI Express Po...
+0000:00:1c.4 Intel Corporation 82801JI (ICH10 Family) PCI Express Ro...
+0000:00:1d.0 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1d.1 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1d.2 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1d.7 Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Cont...
+0000:00:1e.0 Intel Corporation 82801 PCI Bridge
+0000:00:1f.0 Intel Corporation 82801JIR (ICH10R) LPC Interface Contr...
+0000:00:1f.2 Intel Corporation 82801JI (ICH10 Family) SATA AHCI Cont...
+0000:00:1f.3 Intel Corporation 82801JI (ICH10 Family) SMBus Controller
+0000:01:00.0 NEC Corporation uPD720200 USB 3.0 Host Controller
+0000:02:00.0 Marvell Technolog... 88SE9123 PCIe SATA 6.0 Gb/s controller
+0000:02:00.1 Marvell Technolog... 88SE912x IDE Controller
+0000:03:00.0 NVIDIA Corporation GP107 [GeForce GTX 1050 Ti]
+0000:03:00.1 NVIDIA Corporation UNKNOWN
+0000:04:00.0 LSI Logic / Symbi... SAS2004 PCI-Express Fusion-MPT SAS-2 ...
+0000:06:00.0 Qualcomm Atheros AR5418 Wireless Network Adapter [AR50...
+0000:08:03.0 LSI Corporation FW322/323 [TrueFire] 1394a Controller
+0000:3f:00.0 Intel Corporation UNKNOWN
+0000:3f:00.1 Intel Corporation Xeon 5600 Series QuickPath Architectu...
+0000:3f:02.0 Intel Corporation Xeon 5600 Series QPI Link 0
+0000:3f:02.1 Intel Corporation Xeon 5600 Series QPI Physical 0
+0000:3f:02.2 Intel Corporation Xeon 5600 Series Mirror Port Link 0
+0000:3f:02.3 Intel Corporation Xeon 5600 Series Mirror Port Link 1
+0000:3f:03.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:03.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:03.4 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.2 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.3 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.2 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.3 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.2 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.3 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+```
+
+The following code snippet shows how to call the `ghw.PCIInfo.GetDevice()`
+method and use its returned `ghw.PCIDevice` struct pointer:
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ addr := "0000:00:00.0"
+ if len(os.Args) == 2 {
+ addr = os.Args[1]
+ }
+ fmt.Printf("PCI device information for %s\n", addr)
+ fmt.Println("====================================================")
+ deviceInfo := pci.GetDevice(addr)
+ if deviceInfo == nil {
+ fmt.Printf("could not retrieve PCI device information for %s\n", addr)
+ return
+ }
+
+ vendor := deviceInfo.Vendor
+ fmt.Printf("Vendor: %s [%s]\n", vendor.Name, vendor.ID)
+ product := deviceInfo.Product
+ fmt.Printf("Product: %s [%s]\n", product.Name, product.ID)
+ subsystem := deviceInfo.Subsystem
+ subvendor := pci.Vendors[subsystem.VendorID]
+ subvendorName := "UNKNOWN"
+ if subvendor != nil {
+ subvendorName = subvendor.Name
+ }
+ fmt.Printf("Subsystem: %s [%s] (Subvendor: %s)\n", subsystem.Name, subsystem.ID, subvendorName)
+ class := deviceInfo.Class
+ fmt.Printf("Class: %s [%s]\n", class.Name, class.ID)
+ subclass := deviceInfo.Subclass
+ fmt.Printf("Subclass: %s [%s]\n", subclass.Name, subclass.ID)
+ progIface := deviceInfo.ProgrammingInterface
+ fmt.Printf("Programming Interface: %s [%s]\n", progIface.Name, progIface.ID)
+}
+```
+
+Here's a sample output from my local workstation:
+
+```
+PCI device information for 0000:03:00.0
+====================================================
+Vendor: NVIDIA Corporation [10de]
+Product: GP107 [GeForce GTX 1050 Ti] [1c82]
+Subsystem: UNKNOWN [8613] (Subvendor: ASUSTeK Computer Inc.)
+Class: Display controller [03]
+Subclass: VGA compatible controller [00]
+Programming Interface: VGA controller [00]
+```
+
+### GPU
+
+Information about the host computer's graphics hardware is returned from the
+`ghw.GPU()` function. This function returns a pointer to a `ghw.GPUInfo`
+struct.
+
+The `ghw.GPUInfo` struct contains one field:
+
+* `ghw.GPUInfo.GraphicCards` is an array of pointers to `ghw.GraphicsCard`
+ structs, one for each graphics card found for the systen
+
+Each `ghw.GraphicsCard` struct contains the following fields:
+
+* `ghw.GraphicsCard.Index` is the system's numeric zero-based index for the
+ card on the bus
+* `ghw.GraphicsCard.Address` is the PCI address for the graphics card
+* `ghw.GraphicsCard.DeviceInfo` is a pointer to a `ghw.PCIDevice` struct
+ describing the graphics card. This may be `nil` if no PCI device information
+ could be determined for the card.
+* `ghw.GraphicsCard.Node` is an pointer to a `ghw.TopologyNode` struct that the
+ GPU/graphics card is affined to. On non-NUMA systems, this will always be
+ `nil`.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ gpu, err := ghw.GPU()
+ if err != nil {
+ fmt.Printf("Error getting GPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", gpu)
+
+ for _, card := range gpu.GraphicsCards {
+ fmt.Printf(" %v\n", card)
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+gpu (1 graphics card)
+ card #0 @0000:03:00.0 -> class: 'Display controller' vendor: 'NVIDIA Corporation' product: 'GP107 [GeForce GTX 1050 Ti]'
+```
+
+**NOTE**: You can [read more](#pci) about the fields of the `ghw.PCIDevice`
+struct if you'd like to dig deeper into PCI subsystem and programming interface
+information
+
+**NOTE**: You can [read more](#topology) about the fields of the
+`ghw.TopologyNode` struct if you'd like to dig deeper into the NUMA/topology
+subsystem
+
+### Chassis
+
+The host's chassis information is accessible with the `ghw.Chassis()` function. This
+function returns a pointer to a `ghw.ChassisInfo` struct.
+
+The `ghw.ChassisInfo` struct contains multiple fields:
+
+* `ghw.ChassisInfo.AssetTag` is a string with the chassis asset tag
+* `ghw.ChassisInfo.SerialNumber` is a string with the chassis serial number
+* `ghw.ChassisInfo.Type` is a string with the chassis type *code*
+* `ghw.ChassisInfo.TypeDescription` is a string with a description of the chassis type
+* `ghw.ChassisInfo.Vendor` is a string with the chassis vendor
+* `ghw.ChassisInfo.Version` is a string with the chassis version
+
+**NOTE**: These fields are often missing for non-server hardware. Don't be
+surprised to see empty string or "None" values.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ chassis, err := ghw.Chassis()
+ if err != nil {
+ fmt.Printf("Error getting chassis info: %v", err)
+ }
+
+ fmt.Printf("%v\n", chassis)
+}
+```
+
+Example output from my personal workstation:
+
+```
+chassis type=Desktop vendor=System76 version=thelio-r1
+```
+
+**NOTE**: Some of the values such as serial numbers are shown as unknown because
+the Linux kernel by default disallows access to those fields if you're not running
+as root. They will be populated if it runs as root or otherwise you may see warnings
+like the following:
+
+```
+WARNING: Unable to read chassis_serial: open /sys/class/dmi/id/chassis_serial: permission denied
+```
+
+You can ignore them or use the [Disabling warning messages](#disabling-warning-messages)
+feature to quiet things down.
+
+### BIOS
+
+The host's basis input/output system (BIOS) information is accessible with the `ghw.BIOS()` function. This
+function returns a pointer to a `ghw.BIOSInfo` struct.
+
+The `ghw.BIOSInfo` struct contains multiple fields:
+
+* `ghw.BIOSInfo.Vendor` is a string with the BIOS vendor
+* `ghw.BIOSInfo.Version` is a string with the BIOS version
+* `ghw.BIOSInfo.Date` is a string with the date the BIOS was flashed/created
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ bios, err := ghw.BIOS()
+ if err != nil {
+ fmt.Printf("Error getting BIOS info: %v", err)
+ }
+
+ fmt.Printf("%v\n", bios)
+}
+```
+
+Example output from my personal workstation:
+
+```
+bios vendor=System76 version=F2 Z5 date=11/14/2018
+```
+
+### Baseboard
+
+The host's baseboard information is accessible with the `ghw.Baseboard()` function. This
+function returns a pointer to a `ghw.BaseboardInfo` struct.
+
+The `ghw.BaseboardInfo` struct contains multiple fields:
+
+* `ghw.BaseboardInfo.AssetTag` is a string with the baseboard asset tag
+* `ghw.BaseboardInfo.SerialNumber` is a string with the baseboard serial number
+* `ghw.BaseboardInfo.Vendor` is a string with the baseboard vendor
+* `ghw.BaseboardInfo.Product` is a string with the baseboard name on Linux and
+ Product on Windows
+* `ghw.BaseboardInfo.Version` is a string with the baseboard version
+
+**NOTE**: These fields are often missing for non-server hardware. Don't be
+surprised to see empty string or "None" values.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ baseboard, err := ghw.Baseboard()
+ if err != nil {
+ fmt.Printf("Error getting baseboard info: %v", err)
+ }
+
+ fmt.Printf("%v\n", baseboard)
+}
+```
+
+Example output from my personal workstation:
+
+```
+baseboard vendor=System76 version=thelio-r1
+```
+
+**NOTE**: Some of the values such as serial numbers are shown as unknown because
+the Linux kernel by default disallows access to those fields if you're not running
+as root. They will be populated if it runs as root or otherwise you may see warnings
+like the following:
+
+```
+WARNING: Unable to read board_serial: open /sys/class/dmi/id/board_serial: permission denied
+```
+
+You can ignore them or use the [Disabling warning messages](#disabling-warning-messages)
+feature to quiet things down.
+
+### Product
+
+The host's product information is accessible with the `ghw.Product()` function. This
+function returns a pointer to a `ghw.ProductInfo` struct.
+
+The `ghw.ProductInfo` struct contains multiple fields:
+
+* `ghw.ProductInfo.Family` is a string describing the product family
+* `ghw.ProductInfo.Name` is a string with the product name
+* `ghw.ProductInfo.SerialNumber` is a string with the product serial number
+* `ghw.ProductInfo.UUID` is a string with the product UUID
+* `ghw.ProductInfo.SKU` is a string with the product stock unit identifier (SKU)
+* `ghw.ProductInfo.Vendor` is a string with the product vendor
+* `ghw.ProductInfo.Version` is a string with the product version
+
+**NOTE**: These fields are often missing for non-server hardware. Don't be
+surprised to see empty string, "Default string" or "None" values.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ product, err := ghw.Product()
+ if err != nil {
+ fmt.Printf("Error getting product info: %v", err)
+ }
+
+ fmt.Printf("%v\n", product)
+}
+```
+
+Example output from my personal workstation:
+
+```
+product family=Default string name=Thelio vendor=System76 sku=Default string version=thelio-r1
+```
+
+**NOTE**: Some of the values such as serial numbers are shown as unknown because
+the Linux kernel by default disallows access to those fields if you're not running
+as root. They will be populated if it runs as root or otherwise you may see warnings
+like the following:
+
+```
+WARNING: Unable to read product_serial: open /sys/class/dmi/id/product_serial: permission denied
+```
+
+You can ignore them or use the [Disabling warning messages](#disabling-warning-messages)
+feature to quiet things down.
+
+## Serialization
+
+All of the `ghw` `XXXInfo` structs -- e.g. `ghw.CPUInfo` -- have two methods
+for producing a serialized JSON or YAML string representation of the contained
+information:
+
+* `JSONString()` returns a string containing the information serialized into
+ JSON. It accepts a single boolean parameter indicating whether to use
+ indentation when outputting the string
+* `YAMLString()` returns a string containing the information serialized into
+ YAML
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ mem, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ fmt.Printf("%s", mem.YAMLString())
+}
+```
+
+the above example code prints the following out on my local workstation:
+
+```
+memory:
+ supported_page_sizes:
+ - 1073741824
+ - 2097152
+ total_physical_bytes: 25263415296
+ total_usable_bytes: 25263415296
+```
+
+## Calling external programs
+
+By default ghw may call external programs, for example `ethtool`, to learn about hardware capabilities.
+In some rare circumstances it may be useful to opt out from this behaviour and rely only on the data
+provided by pseudo-filesystems, like sysfs.
+The most common use case is when we want to consume a snapshot from ghw. In these cases the information
+provided by tools will be most likely inconsistent with the data from the snapshot - they will run on
+a different host!
+To prevent ghw from calling external tools, set the environs variable `GHW_DISABLE_TOOLS` to any value,
+or, programmatically, check the `WithDisableTools` function.
+The default behaviour of ghw is to call external tools when available.
+
+**WARNING**:
+- on all platforms, disabling external tools make ghw return less data.
+ Unless noted otherwise, there is _no fallback flow_ if external tools are disabled.
+- on darwin, disabling external tools disable block support entirely
+
+## Developers
+
+[Contributions](CONTRIBUTING.md) to `ghw` are welcomed! Fork the repo on GitHub
+and submit a pull request with your proposed changes. Or, feel free to log an
+issue for a feature request or bug report.
+
+### Running tests
+
+You can run unit tests easily using the `make test` command, like so:
+
+```
+[jaypipes@uberbox ghw]$ make test
+go test github.com/jaypipes/ghw github.com/jaypipes/ghw/cmd/ghwc
+ok github.com/jaypipes/ghw 0.084s
+? github.com/jaypipes/ghw/cmd/ghwc [no test files]
+```
diff --git a/vendor/github.com/jaypipes/ghw/SNAPSHOT.md b/vendor/github.com/jaypipes/ghw/SNAPSHOT.md
new file mode 100644
index 0000000000..696a3ea635
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/SNAPSHOT.md
@@ -0,0 +1,45 @@
+# ghw snapshots
+
+For ghw, snapshots are partial clones of the `/proc`, `/sys` (et. al.) subtrees copied from arbitrary
+machines, which ghw can consume later. "partial" is because the snapshot doesn't need to contain a
+complete copy of all the filesystem subtree (that is doable but inpractical). It only needs to contain
+the paths ghw cares about. The snapshot concept was introduced [to make ghw easier to test](https://github.com/jaypipes/ghw/issues/66).
+
+## Create and consume snapshot
+
+The recommended way to create snapshots for ghw is to use the `ghw-snapshot` tool.
+This tool is maintained by the ghw authors, and snapshots created with this tool are guaranteed to work.
+
+To consume the ghw snapshots, please check the `README.md` document.
+
+## Snapshot design and definitions
+
+The remainder of this document will describe how a snapshot looks like and provides rationale for all the major design decisions.
+Even though this document aims to provide all the necessary information to understand how ghw creates snapshots and what you should
+expect, we recommend to check also the [project issues](https://github.com/jaypipes/ghw/issues) and the `git` history to have the full picture.
+
+### Scope
+
+ghw supports snapshots only on linux platforms. This restriction may be lifted in future releases.
+Snapshots must be consumable in the following supported ways:
+
+1. (way 1) from docker (or podman), mounting them as volumes. See `hack/run-against-snapshot.sh`
+2. (way 2) using the environment variables `GHW_SNAPSHOT_*`. See `README.md` for the full documentation.
+
+Other combinations are possible, but are unsupported and may stop working any time.
+You should depend only on the supported ways to consume snapshots.
+
+### Snapshot content constraints
+
+Stemming from the use cases, the snapshot content must have the following properties:
+
+0. (constraint 0) MUST contain the same information as live system (obviously). Whatever you learn from a live system, you MUST be able to learn from a snapshot.
+1. (constraint 1) MUST NOT require any post processing before it is consumable besides, obviously, unpacking the `.tar.gz` on the right directory - and pointing ghw to that directory.
+2. (constraint 2) MUST NOT require any special handling nor special code path in ghw. From ghw perspective running against a live system or against a snapshot should be completely transparent.
+3. (constraint 3) MUST contain only data - no executable code is allowed ever. This makes snapshots trivially safe to share and consume.
+4. (constraint 4) MUST NOT contain any personally-identifiable data. Data gathered into a snapshot is for testing and troubleshooting purposes and should be safe to send to troubleshooters to analyze.
+
+It must be noted that trivially cloning subtrees from `/proc` and `/sys` and creating a tarball out of them doesn't work
+because both pseudo filesystems make use of symlinks, and [docker doesn't really play nice with symlinks](https://github.com/jaypipes/ghw/commit/f8ffd4d24e62eb9017511f072ccf51f13d4a3399).
+This conflcits with (way 1) above.
+
diff --git a/vendor/github.com/jaypipes/ghw/alias.go b/vendor/github.com/jaypipes/ghw/alias.go
new file mode 100644
index 0000000000..2e679a9678
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/alias.go
@@ -0,0 +1,152 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package ghw
+
+import (
+ "github.com/jaypipes/ghw/pkg/baseboard"
+ "github.com/jaypipes/ghw/pkg/bios"
+ "github.com/jaypipes/ghw/pkg/block"
+ "github.com/jaypipes/ghw/pkg/chassis"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/gpu"
+ "github.com/jaypipes/ghw/pkg/memory"
+ "github.com/jaypipes/ghw/pkg/net"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/pci"
+ pciaddress "github.com/jaypipes/ghw/pkg/pci/address"
+ "github.com/jaypipes/ghw/pkg/product"
+ "github.com/jaypipes/ghw/pkg/topology"
+)
+
+type WithOption = option.Option
+
+var (
+ WithChroot = option.WithChroot
+ WithSnapshot = option.WithSnapshot
+ WithAlerter = option.WithAlerter
+ WithNullAlerter = option.WithNullAlerter
+ // match the existing environ variable to minimize surprises
+ WithDisableWarnings = option.WithNullAlerter
+ WithDisableTools = option.WithDisableTools
+ WithPathOverrides = option.WithPathOverrides
+)
+
+type SnapshotOptions = option.SnapshotOptions
+
+type PathOverrides = option.PathOverrides
+
+type CPUInfo = cpu.Info
+
+var (
+ CPU = cpu.New
+)
+
+type MemoryArea = memory.Area
+type MemoryInfo = memory.Info
+type MemoryCacheType = memory.CacheType
+type MemoryModule = memory.Module
+
+const (
+ MEMORY_CACHE_TYPE_UNIFIED = memory.CACHE_TYPE_UNIFIED
+ MEMORY_CACHE_TYPE_INSTRUCTION = memory.CACHE_TYPE_INSTRUCTION
+ MEMORY_CACHE_TYPE_DATA = memory.CACHE_TYPE_DATA
+)
+
+var (
+ Memory = memory.New
+)
+
+type BlockInfo = block.Info
+type Disk = block.Disk
+type Partition = block.Partition
+
+var (
+ Block = block.New
+)
+
+type DriveType = block.DriveType
+
+const (
+ DRIVE_TYPE_UNKNOWN = block.DRIVE_TYPE_UNKNOWN
+ DRIVE_TYPE_HDD = block.DRIVE_TYPE_HDD
+ DRIVE_TYPE_FDD = block.DRIVE_TYPE_FDD
+ DRIVE_TYPE_ODD = block.DRIVE_TYPE_ODD
+ DRIVE_TYPE_SSD = block.DRIVE_TYPE_SSD
+)
+
+type StorageController = block.StorageController
+
+const (
+ STORAGE_CONTROLLER_UNKNOWN = block.STORAGE_CONTROLLER_UNKNOWN
+ STORAGE_CONTROLLER_IDE = block.STORAGE_CONTROLLER_IDE
+ STORAGE_CONTROLLER_SCSI = block.STORAGE_CONTROLLER_SCSI
+ STORAGE_CONTROLLER_NVME = block.STORAGE_CONTROLLER_NVME
+ STORAGE_CONTROLLER_VIRTIO = block.STORAGE_CONTROLLER_VIRTIO
+ STORAGE_CONTROLLER_MMC = block.STORAGE_CONTROLLER_MMC
+)
+
+type NetworkInfo = net.Info
+type NIC = net.NIC
+type NICCapability = net.NICCapability
+
+var (
+ Network = net.New
+)
+
+type BIOSInfo = bios.Info
+
+var (
+ BIOS = bios.New
+)
+
+type ChassisInfo = chassis.Info
+
+var (
+ Chassis = chassis.New
+)
+
+type BaseboardInfo = baseboard.Info
+
+var (
+ Baseboard = baseboard.New
+)
+
+type TopologyInfo = topology.Info
+type TopologyNode = topology.Node
+
+var (
+ Topology = topology.New
+)
+
+type Architecture = topology.Architecture
+
+const (
+ ARCHITECTURE_SMP = topology.ARCHITECTURE_SMP
+ ARCHITECTURE_NUMA = topology.ARCHITECTURE_NUMA
+)
+
+type PCIInfo = pci.Info
+type PCIAddress = pciaddress.Address
+type PCIDevice = pci.Device
+
+var (
+ PCI = pci.New
+ PCIAddressFromString = pciaddress.FromString
+)
+
+type ProductInfo = product.Info
+
+var (
+ Product = product.New
+)
+
+type GPUInfo = gpu.Info
+type GraphicsCard = gpu.GraphicsCard
+
+var (
+ GPU = gpu.New
+)
diff --git a/vendor/github.com/jaypipes/ghw/doc.go b/vendor/github.com/jaypipes/ghw/doc.go
new file mode 100644
index 0000000000..9ae0c30ae0
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/doc.go
@@ -0,0 +1,314 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+/*
+ package ghw can determine various hardware-related
+ information about the host computer:
+
+ * Memory
+ * CPU
+ * Block storage
+ * Topology
+ * Network
+ * PCI
+ * GPU
+
+ Memory
+
+ Information about the host computer's memory can be retrieved using the
+ Memory function which returns a pointer to a MemoryInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ memory, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ fmt.Println(memory.String())
+ }
+
+ CPU
+
+ The CPU function returns a CPUInfo struct that contains information about
+ the CPUs on the host system.
+
+ package main
+
+ import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ cpu, err := ghw.CPU()
+ if err != nil {
+ fmt.Printf("Error getting CPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", cpu)
+
+ for _, proc := range cpu.Processors {
+ fmt.Printf(" %v\n", proc)
+ for _, core := range proc.Cores {
+ fmt.Printf(" %v\n", core)
+ }
+ if len(proc.Capabilities) > 0 {
+ // pretty-print the (large) block of capability strings into rows
+ // of 6 capability strings
+ rows := int(math.Ceil(float64(len(proc.Capabilities)) / float64(6)))
+ for row := 1; row < rows; row = row + 1 {
+ rowStart := (row * 6) - 1
+ rowEnd := int(math.Min(float64(rowStart+6), float64(len(proc.Capabilities))))
+ rowElems := proc.Capabilities[rowStart:rowEnd]
+ capStr := strings.Join(rowElems, " ")
+ if row == 1 {
+ fmt.Printf(" capabilities: [%s\n", capStr)
+ } else if rowEnd < len(proc.Capabilities) {
+ fmt.Printf(" %s\n", capStr)
+ } else {
+ fmt.Printf(" %s]\n", capStr)
+ }
+ }
+ }
+ }
+ }
+
+ Block storage
+
+ Information about the host computer's local block storage is returned from
+ the Block function. This function returns a pointer to a BlockInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ block, err := ghw.Block()
+ if err != nil {
+ fmt.Printf("Error getting block storage info: %v", err)
+ }
+
+ fmt.Printf("%v\n", block)
+
+ for _, disk := range block.Disks {
+ fmt.Printf(" %v\n", disk)
+ for _, part := range disk.Partitions {
+ fmt.Printf(" %v\n", part)
+ }
+ }
+ }
+
+ Topology
+
+ Information about the host computer's architecture (NUMA vs. SMP), the
+ host's node layout and processor caches can be retrieved from the Topology
+ function. This function returns a pointer to a TopologyInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ topology, err := ghw.Topology()
+ if err != nil {
+ fmt.Printf("Error getting topology info: %v", err)
+ }
+
+ fmt.Printf("%v\n", topology)
+
+ for _, node := range topology.Nodes {
+ fmt.Printf(" %v\n", node)
+ for _, cache := range node.Caches {
+ fmt.Printf(" %v\n", cache)
+ }
+ }
+ }
+
+ Network
+
+ Information about the host computer's networking hardware is returned from
+ the Network function. This function returns a pointer to a NetworkInfo
+ struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ net, err := ghw.Network()
+ if err != nil {
+ fmt.Printf("Error getting network info: %v", err)
+ }
+
+ fmt.Printf("%v\n", net)
+
+ for _, nic := range net.NICs {
+ fmt.Printf(" %v\n", nic)
+
+ enabledCaps := make([]int, 0)
+ for x, cap := range nic.Capabilities {
+ if cap.IsEnabled {
+ enabledCaps = append(enabledCaps, x)
+ }
+ }
+ if len(enabledCaps) > 0 {
+ fmt.Printf(" enabled capabilities:\n")
+ for _, x := range enabledCaps {
+ fmt.Printf(" - %s\n", nic.Capabilities[x].Name)
+ }
+ }
+ }
+ }
+
+ PCI
+
+ ghw contains a PCI database inspection and querying facility that allows
+ developers to not only gather information about devices on a local PCI bus
+ but also query for information about hardware device classes, vendor and
+ product information.
+
+ **NOTE**: Parsing of the PCI-IDS file database is provided by the separate
+ http://github.com/jaypipes/pcidb library. You can read that library's
+ README for more information about the various structs that are exposed on
+ the PCIInfo struct.
+
+ PCIInfo.ListDevices is used to iterate over a host's PCI devices:
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+ fmt.Printf("host PCI devices:\n")
+ fmt.Println("====================================================")
+ devices := pci.ListDevices()
+ if len(devices) == 0 {
+ fmt.Printf("error: could not retrieve PCI devices\n")
+ return
+ }
+
+ for _, device := range devices {
+ vendor := device.Vendor
+ vendorName := vendor.Name
+ if len(vendor.Name) > 20 {
+ vendorName = string([]byte(vendorName)[0:17]) + "..."
+ }
+ product := device.Product
+ productName := product.Name
+ if len(product.Name) > 40 {
+ productName = string([]byte(productName)[0:37]) + "..."
+ }
+ fmt.Printf("%-12s\t%-20s\t%-40s\n", device.Address, vendorName, productName)
+ }
+ }
+
+ The following code snippet shows how to call the PCIInfo.GetDevice method
+ and use its returned PCIDevice struct pointer:
+
+ package main
+
+ import (
+ "fmt"
+ "os"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ addr := "0000:00:00.0"
+ if len(os.Args) == 2 {
+ addr = os.Args[1]
+ }
+ fmt.Printf("PCI device information for %s\n", addr)
+ fmt.Println("====================================================")
+ deviceInfo := pci.GetDevice(addr)
+ if deviceInfo == nil {
+ fmt.Printf("could not retrieve PCI device information for %s\n", addr)
+ return
+ }
+
+ vendor := deviceInfo.Vendor
+ fmt.Printf("Vendor: %s [%s]\n", vendor.Name, vendor.ID)
+ product := deviceInfo.Product
+ fmt.Printf("Product: %s [%s]\n", product.Name, product.ID)
+ subsystem := deviceInfo.Subsystem
+ subvendor := pci.Vendors[subsystem.VendorID]
+ subvendorName := "UNKNOWN"
+ if subvendor != nil {
+ subvendorName = subvendor.Name
+ }
+ fmt.Printf("Subsystem: %s [%s] (Subvendor: %s)\n", subsystem.Name, subsystem.ID, subvendorName)
+ class := deviceInfo.Class
+ fmt.Printf("Class: %s [%s]\n", class.Name, class.ID)
+ subclass := deviceInfo.Subclass
+ fmt.Printf("Subclass: %s [%s]\n", subclass.Name, subclass.ID)
+ progIface := deviceInfo.ProgrammingInterface
+ fmt.Printf("Programming Interface: %s [%s]\n", progIface.Name, progIface.ID)
+ }
+
+ GPU
+
+ Information about the host computer's graphics hardware is returned from
+ the GPU function. This function returns a pointer to a GPUInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ gpu, err := ghw.GPU()
+ if err != nil {
+ fmt.Printf("Error getting GPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", gpu)
+
+ for _, card := range gpu.GraphicsCards {
+ fmt.Printf(" %v\n", card)
+ }
+ }
+*/
+package ghw
diff --git a/vendor/github.com/jaypipes/ghw/host.go b/vendor/github.com/jaypipes/ghw/host.go
new file mode 100644
index 0000000000..5d82a53a14
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/host.go
@@ -0,0 +1,139 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package ghw
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+
+ "github.com/jaypipes/ghw/pkg/baseboard"
+ "github.com/jaypipes/ghw/pkg/bios"
+ "github.com/jaypipes/ghw/pkg/block"
+ "github.com/jaypipes/ghw/pkg/chassis"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/gpu"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/memory"
+ "github.com/jaypipes/ghw/pkg/net"
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/product"
+ "github.com/jaypipes/ghw/pkg/topology"
+)
+
+// HostInfo is a wrapper struct containing information about the host system's
+// memory, block storage, CPU, etc
+type HostInfo struct {
+ ctx *context.Context
+ Memory *memory.Info `json:"memory"`
+ Block *block.Info `json:"block"`
+ CPU *cpu.Info `json:"cpu"`
+ Topology *topology.Info `json:"topology"`
+ Network *net.Info `json:"network"`
+ GPU *gpu.Info `json:"gpu"`
+ Chassis *chassis.Info `json:"chassis"`
+ BIOS *bios.Info `json:"bios"`
+ Baseboard *baseboard.Info `json:"baseboard"`
+ Product *product.Info `json:"product"`
+ PCI *pci.Info `json:"pci"`
+}
+
+// Host returns a pointer to a HostInfo struct that contains fields with
+// information about the host system's CPU, memory, network devices, etc
+func Host(opts ...*WithOption) (*HostInfo, error) {
+ ctx := context.New(opts...)
+
+ memInfo, err := memory.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ blockInfo, err := block.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ cpuInfo, err := cpu.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ topologyInfo, err := topology.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ netInfo, err := net.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ gpuInfo, err := gpu.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ chassisInfo, err := chassis.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ biosInfo, err := bios.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ baseboardInfo, err := baseboard.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ productInfo, err := product.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ pciInfo, err := pci.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &HostInfo{
+ ctx: ctx,
+ CPU: cpuInfo,
+ Memory: memInfo,
+ Block: blockInfo,
+ Topology: topologyInfo,
+ Network: netInfo,
+ GPU: gpuInfo,
+ Chassis: chassisInfo,
+ BIOS: biosInfo,
+ Baseboard: baseboardInfo,
+ Product: productInfo,
+ PCI: pciInfo,
+ }, nil
+}
+
+// String returns a newline-separated output of the HostInfo's component
+// structs' String-ified output
+func (info *HostInfo) String() string {
+ return fmt.Sprintf(
+ "%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n",
+ info.Block.String(),
+ info.CPU.String(),
+ info.GPU.String(),
+ info.Memory.String(),
+ info.Network.String(),
+ info.Topology.String(),
+ info.Chassis.String(),
+ info.BIOS.String(),
+ info.Baseboard.String(),
+ info.Product.String(),
+ info.PCI.String(),
+ )
+}
+
+// YAMLString returns a string with the host information formatted as YAML
+// under a top-level "host:" key
+func (i *HostInfo) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, i)
+}
+
+// JSONString returns a string with the host information formatted as JSON
+// under a top-level "host:" key
+func (i *HostInfo) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, i, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go
new file mode 100644
index 0000000000..ac4bf41a9b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go
@@ -0,0 +1,80 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// Info defines baseboard release information
+type Info struct {
+ ctx *context.Context
+ AssetTag string `json:"asset_tag"`
+ SerialNumber string `json:"serial_number"`
+ Vendor string `json:"vendor"`
+ Version string `json:"version"`
+ Product string `json:"product"`
+}
+
+func (i *Info) String() string {
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ serialStr := ""
+ if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN {
+ serialStr = " serial=" + i.SerialNumber
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+
+ productStr := ""
+ if i.Product != "" {
+ productStr = " product=" + i.Product
+ }
+
+ return "baseboard" + util.ConcatStrings(
+ vendorStr,
+ serialStr,
+ versionStr,
+ productStr,
+ )
+}
+
+// New returns a pointer to an Info struct containing information about the
+// host's baseboard
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate baseboard information in a top-level
+// "baseboard" YAML/JSON map/object key
+type baseboardPrinter struct {
+ Info *Info `json:"baseboard"`
+}
+
+// YAMLString returns a string with the baseboard information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, baseboardPrinter{info})
+}
+
+// JSONString returns a string with the baseboard information formatted as JSON
+// under a top-level "baseboard:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, baseboardPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go
new file mode 100644
index 0000000000..c8c598d421
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go
@@ -0,0 +1,20 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "github.com/jaypipes/ghw/pkg/linuxdmi"
+)
+
+func (i *Info) load() error {
+ i.AssetTag = linuxdmi.Item(i.ctx, "board_asset_tag")
+ i.SerialNumber = linuxdmi.Item(i.ctx, "board_serial")
+ i.Vendor = linuxdmi.Item(i.ctx, "board_vendor")
+ i.Version = linuxdmi.Item(i.ctx, "board_version")
+ i.Product = linuxdmi.Item(i.ctx, "board_name")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go
new file mode 100644
index 0000000000..f5b146919d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("baseboardFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go
new file mode 100644
index 0000000000..0fb14fbffe
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go
@@ -0,0 +1,37 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "github.com/StackExchange/wmi"
+)
+
+const wqlBaseboard = "SELECT Manufacturer, SerialNumber, Tag, Version, Product FROM Win32_BaseBoard"
+
+type win32Baseboard struct {
+ Manufacturer *string
+ SerialNumber *string
+ Tag *string
+ Version *string
+ Product *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32BaseboardDescriptions []win32Baseboard
+ if err := wmi.Query(wqlBaseboard, &win32BaseboardDescriptions); err != nil {
+ return err
+ }
+ if len(win32BaseboardDescriptions) > 0 {
+ i.AssetTag = *win32BaseboardDescriptions[0].Tag
+ i.SerialNumber = *win32BaseboardDescriptions[0].SerialNumber
+ i.Vendor = *win32BaseboardDescriptions[0].Manufacturer
+ i.Version = *win32BaseboardDescriptions[0].Version
+ i.Product = *win32BaseboardDescriptions[0].Product
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go
new file mode 100644
index 0000000000..85a7c64b16
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go
@@ -0,0 +1,77 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// Info defines BIOS release information
+type Info struct {
+ ctx *context.Context
+ Vendor string `json:"vendor"`
+ Version string `json:"version"`
+ Date string `json:"date"`
+}
+
+func (i *Info) String() string {
+
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+ dateStr := ""
+ if i.Date != "" && i.Date != util.UNKNOWN {
+ dateStr = " date=" + i.Date
+ }
+
+ res := fmt.Sprintf(
+ "bios%s%s%s",
+ vendorStr,
+ versionStr,
+ dateStr,
+ )
+ return res
+}
+
+// New returns a pointer to a Info struct containing information
+// about the host's BIOS
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate BIOS information in a top-level
+// "bios" YAML/JSON map/object key
+type biosPrinter struct {
+ Info *Info `json:"bios"`
+}
+
+// YAMLString returns a string with the BIOS information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, biosPrinter{info})
+}
+
+// JSONString returns a string with the BIOS information formatted as JSON
+// under a top-level "bios:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, biosPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go
new file mode 100644
index 0000000000..9788f4f7a1
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go
@@ -0,0 +1,16 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import "github.com/jaypipes/ghw/pkg/linuxdmi"
+
+func (i *Info) load() error {
+ i.Vendor = linuxdmi.Item(i.ctx, "bios_vendor")
+ i.Version = linuxdmi.Item(i.ctx, "bios_version")
+ i.Date = linuxdmi.Item(i.ctx, "bios_date")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go
new file mode 100644
index 0000000000..5307b4a0a9
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("biosFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go
new file mode 100644
index 0000000000..778628e9a8
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go
@@ -0,0 +1,32 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import (
+ "github.com/StackExchange/wmi"
+)
+
+const wqlBIOS = "SELECT InstallDate, Manufacturer, Version FROM CIM_BIOSElement"
+
+type win32BIOS struct {
+ InstallDate *string
+ Manufacturer *string
+ Version *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32BIOSDescriptions []win32BIOS
+ if err := wmi.Query(wqlBIOS, &win32BIOSDescriptions); err != nil {
+ return err
+ }
+ if len(win32BIOSDescriptions) > 0 {
+ i.Vendor = *win32BIOSDescriptions[0].Manufacturer
+ i.Version = *win32BIOSDescriptions[0].Version
+ i.Date = *win32BIOSDescriptions[0].InstallDate
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block.go b/vendor/github.com/jaypipes/ghw/pkg/block/block.go
new file mode 100644
index 0000000000..38830ccf79
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block.go
@@ -0,0 +1,309 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// DriveType describes the general category of drive device
+type DriveType int
+
+const (
+ DRIVE_TYPE_UNKNOWN DriveType = iota
+ DRIVE_TYPE_HDD // Hard disk drive
+ DRIVE_TYPE_FDD // Floppy disk drive
+ DRIVE_TYPE_ODD // Optical disk drive
+ DRIVE_TYPE_SSD // Solid-state drive
+)
+
+var (
+ driveTypeString = map[DriveType]string{
+ DRIVE_TYPE_UNKNOWN: "Unknown",
+ DRIVE_TYPE_HDD: "HDD",
+ DRIVE_TYPE_FDD: "FDD",
+ DRIVE_TYPE_ODD: "ODD",
+ DRIVE_TYPE_SSD: "SSD",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `driveTypeString`.
+ // This is done because of the choice we made in
+ // DriveType::MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringDriveType = map[string]DriveType{
+ "unknown": DRIVE_TYPE_UNKNOWN,
+ "hdd": DRIVE_TYPE_HDD,
+ "fdd": DRIVE_TYPE_FDD,
+ "odd": DRIVE_TYPE_ODD,
+ "ssd": DRIVE_TYPE_SSD,
+ }
+)
+
+func (dt DriveType) String() string {
+ return driveTypeString[dt]
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (dt DriveType) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(dt.String()))), nil
+}
+
+func (dt *DriveType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringDriveType[key]
+ if !ok {
+ return fmt.Errorf("unknown drive type: %q", key)
+ }
+ *dt = val
+ return nil
+}
+
+// StorageController is a category of block storage controller/driver. It
+// represents more of the physical hardware interface than the storage
+// protocol, which represents more of the software interface.
+//
+// See discussion on https://github.com/jaypipes/ghw/issues/117
+type StorageController int
+
+const (
+ STORAGE_CONTROLLER_UNKNOWN StorageController = iota
+ STORAGE_CONTROLLER_IDE // Integrated Drive Electronics
+ STORAGE_CONTROLLER_SCSI // Small computer system interface
+ STORAGE_CONTROLLER_NVME // Non-volatile Memory Express
+ STORAGE_CONTROLLER_VIRTIO // Virtualized storage controller/driver
+ STORAGE_CONTROLLER_MMC // Multi-media controller (used for mobile phone storage devices)
+)
+
+var (
+ storageControllerString = map[StorageController]string{
+ STORAGE_CONTROLLER_UNKNOWN: "Unknown",
+ STORAGE_CONTROLLER_IDE: "IDE",
+ STORAGE_CONTROLLER_SCSI: "SCSI",
+ STORAGE_CONTROLLER_NVME: "NVMe",
+ STORAGE_CONTROLLER_VIRTIO: "virtio",
+ STORAGE_CONTROLLER_MMC: "MMC",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `storageControllerString`.
+ // This is done/ because of the choice we made in
+ // StorageController::MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringStorageController = map[string]StorageController{
+ "unknown": STORAGE_CONTROLLER_UNKNOWN,
+ "ide": STORAGE_CONTROLLER_IDE,
+ "scsi": STORAGE_CONTROLLER_SCSI,
+ "nvme": STORAGE_CONTROLLER_NVME,
+ "virtio": STORAGE_CONTROLLER_VIRTIO,
+ "mmc": STORAGE_CONTROLLER_MMC,
+ }
+)
+
+func (sc StorageController) String() string {
+ return storageControllerString[sc]
+}
+
+func (sc *StorageController) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringStorageController[key]
+ if !ok {
+ return fmt.Errorf("unknown storage controller: %q", key)
+ }
+ *sc = val
+ return nil
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (sc StorageController) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(sc.String()))), nil
+}
+
+// Disk describes a single disk drive on the host system. Disk drives provide
+// raw block storage resources.
+type Disk struct {
+ Name string `json:"name"`
+ SizeBytes uint64 `json:"size_bytes"`
+ PhysicalBlockSizeBytes uint64 `json:"physical_block_size_bytes"`
+ DriveType DriveType `json:"drive_type"`
+ IsRemovable bool `json:"removable"`
+ StorageController StorageController `json:"storage_controller"`
+ BusPath string `json:"bus_path"`
+ // TODO(jaypipes): Convert this to a TopologyNode struct pointer and then
+ // add to serialized output as "numa_node,omitempty"
+ NUMANodeID int `json:"-"`
+ Vendor string `json:"vendor"`
+ Model string `json:"model"`
+ SerialNumber string `json:"serial_number"`
+ WWN string `json:"wwn"`
+ Partitions []*Partition `json:"partitions"`
+ // TODO(jaypipes): Add PCI field for accessing PCI device information
+ // PCI *PCIDevice `json:"pci"`
+}
+
+// Partition describes a logical division of a Disk.
+type Partition struct {
+ Disk *Disk `json:"-"`
+ Name string `json:"name"`
+ Label string `json:"label"`
+ MountPoint string `json:"mount_point"`
+ SizeBytes uint64 `json:"size_bytes"`
+ Type string `json:"type"`
+ IsReadOnly bool `json:"read_only"`
+ UUID string `json:"uuid"` // This would be volume UUID on macOS, PartUUID on linux, empty on Windows
+}
+
+// Info describes all disk drives and partitions in the host system.
+type Info struct {
+ ctx *context.Context
+ // TODO(jaypipes): Deprecate this field and replace with TotalSizeBytes
+ TotalPhysicalBytes uint64 `json:"total_size_bytes"`
+ Disks []*Disk `json:"disks"`
+ Partitions []*Partition `json:"-"`
+}
+
+// New returns a pointer to an Info struct that describes the block storage
+// resources of the host system.
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ tpbs := util.UNKNOWN
+ if i.TotalPhysicalBytes > 0 {
+ tpb := i.TotalPhysicalBytes
+ unit, unitStr := unitutil.AmountString(int64(tpb))
+ tpb = uint64(math.Ceil(float64(tpb) / float64(unit)))
+ tpbs = fmt.Sprintf("%d%s", tpb, unitStr)
+ }
+ dplural := "disks"
+ if len(i.Disks) == 1 {
+ dplural = "disk"
+ }
+ return fmt.Sprintf("block storage (%d %s, %s physical storage)",
+ len(i.Disks), dplural, tpbs)
+}
+
+func (d *Disk) String() string {
+ sizeStr := util.UNKNOWN
+ if d.SizeBytes > 0 {
+ size := d.SizeBytes
+ unit, unitStr := unitutil.AmountString(int64(size))
+ size = uint64(math.Ceil(float64(size) / float64(unit)))
+ sizeStr = fmt.Sprintf("%d%s", size, unitStr)
+ }
+ atNode := ""
+ if d.NUMANodeID >= 0 {
+ atNode = fmt.Sprintf(" (node #%d)", d.NUMANodeID)
+ }
+ vendor := ""
+ if d.Vendor != "" {
+ vendor = " vendor=" + d.Vendor
+ }
+ model := ""
+ if d.Model != util.UNKNOWN {
+ model = " model=" + d.Model
+ }
+ serial := ""
+ if d.SerialNumber != util.UNKNOWN {
+ serial = " serial=" + d.SerialNumber
+ }
+ wwn := ""
+ if d.WWN != util.UNKNOWN {
+ wwn = " WWN=" + d.WWN
+ }
+ removable := ""
+ if d.IsRemovable {
+ removable = " removable=true"
+ }
+ return fmt.Sprintf(
+ "%s %s (%s) %s [@%s%s]%s",
+ d.Name,
+ d.DriveType.String(),
+ sizeStr,
+ d.StorageController.String(),
+ d.BusPath,
+ atNode,
+ util.ConcatStrings(
+ vendor,
+ model,
+ serial,
+ wwn,
+ removable,
+ ),
+ )
+}
+
+func (p *Partition) String() string {
+ typeStr := ""
+ if p.Type != "" {
+ typeStr = fmt.Sprintf("[%s]", p.Type)
+ }
+ mountStr := ""
+ if p.MountPoint != "" {
+ mountStr = fmt.Sprintf(" mounted@%s", p.MountPoint)
+ }
+ sizeStr := util.UNKNOWN
+ if p.SizeBytes > 0 {
+ size := p.SizeBytes
+ unit, unitStr := unitutil.AmountString(int64(size))
+ size = uint64(math.Ceil(float64(size) / float64(unit)))
+ sizeStr = fmt.Sprintf("%d%s", size, unitStr)
+ }
+ return fmt.Sprintf(
+ "%s (%s) %s%s",
+ p.Name,
+ sizeStr,
+ typeStr,
+ mountStr,
+ )
+}
+
+// simple private struct used to encapsulate block information in a top-level
+// "block" YAML/JSON map/object key
+type blockPrinter struct {
+ Info *Info `json:"block" yaml:"block"`
+}
+
+// YAMLString returns a string with the block information formatted as YAML
+// under a top-level "block:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, blockPrinter{i})
+}
+
+// JSONString returns a string with the block information formatted as JSON
+// under a top-level "block:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, blockPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go
new file mode 100644
index 0000000000..5115d404ba
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go
@@ -0,0 +1,287 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+
+ "github.com/pkg/errors"
+ "howett.net/plist"
+)
+
+type diskOrPartitionPlistNode struct {
+ Content string
+ DeviceIdentifier string
+ DiskUUID string
+ VolumeName string
+ VolumeUUID string
+ Size int64
+ MountPoint string
+ Partitions []diskOrPartitionPlistNode
+ APFSVolumes []diskOrPartitionPlistNode
+}
+
+type diskUtilListPlist struct {
+ AllDisks []string
+ AllDisksAndPartitions []diskOrPartitionPlistNode
+ VolumesFromDisks []string
+ WholeDisks []string
+}
+
+type diskUtilInfoPlist struct {
+ AESHardware bool // true
+ Bootable bool // true
+ BooterDeviceIdentifier string // disk1s2
+ BusProtocol string // PCI-Express
+ CanBeMadeBootable bool // false
+ CanBeMadeBootableRequiresDestroy bool // false
+ Content string // some-uuid-foo-bar
+ DeviceBlockSize int64 // 4096
+ DeviceIdentifier string // disk1s1
+ DeviceNode string // /dev/disk1s1
+ DeviceTreePath string // IODeviceTree:/PCI0@0/RP17@1B/ANS2@0/AppleANS2Controller
+ DiskUUID string // some-uuid-foo-bar
+ Ejectable bool // false
+ EjectableMediaAutomaticUnderSoftwareControl bool // false
+ EjectableOnly bool // false
+ FilesystemName string // APFS
+ FilesystemType string // apfs
+ FilesystemUserVisibleName string // APFS
+ FreeSpace int64 // 343975677952
+ GlobalPermissionsEnabled bool // true
+ IOKitSize int64 // 499963174912
+ IORegistryEntryName string // Macintosh HD
+ Internal bool // true
+ MediaName string //
+ MediaType string // Generic
+ MountPoint string // /
+ ParentWholeDisk string // disk1
+ PartitionMapPartition bool // false
+ RAIDMaster bool // false
+ RAIDSlice bool // false
+ RecoveryDeviceIdentifier string // disk1s3
+ Removable bool // false
+ RemovableMedia bool // false
+ RemovableMediaOrExternalDevice bool // false
+ SMARTStatus string // Verified
+ Size int64 // 499963174912
+ SolidState bool // true
+ SupportsGlobalPermissionsDisable bool // true
+ SystemImage bool // false
+ TotalSize int64 // 499963174912
+ VolumeAllocationBlockSize int64 // 4096
+ VolumeName string // Macintosh HD
+ VolumeSize int64 // 499963174912
+ VolumeUUID string // some-uuid-foo-bar
+ WholeDisk bool // false
+ Writable bool // true
+ WritableMedia bool // true
+ WritableVolume bool // true
+ // also has a SMARTDeviceSpecificKeysMayVaryNotGuaranteed dict with various info
+ // NOTE: VolumeUUID sometimes == DiskUUID, but not always. So far Content is always a different UUID.
+}
+
+type ioregPlist struct {
+ // there's a lot more than just this...
+ ModelNumber string `plist:"Model Number"`
+ SerialNumber string `plist:"Serial Number"`
+ VendorName string `plist:"Vendor Name"`
+}
+
+func getDiskUtilListPlist() (*diskUtilListPlist, error) {
+ out, err := exec.Command("diskutil", "list", "-plist").Output()
+ if err != nil {
+ return nil, errors.Wrap(err, "diskutil list failed")
+ }
+
+ var data diskUtilListPlist
+ if _, err := plist.Unmarshal(out, &data); err != nil {
+ return nil, errors.Wrap(err, "diskutil list plist unmarshal failed")
+ }
+
+ return &data, nil
+}
+
+func getDiskUtilInfoPlist(device string) (*diskUtilInfoPlist, error) {
+ out, err := exec.Command("diskutil", "info", "-plist", device).Output()
+ if err != nil {
+ return nil, errors.Wrapf(err, "diskutil info for %q failed", device)
+ }
+
+ var data diskUtilInfoPlist
+ if _, err := plist.Unmarshal(out, &data); err != nil {
+ return nil, errors.Wrapf(err, "diskutil info plist unmarshal for %q failed", device)
+ }
+
+ return &data, nil
+}
+
+func getIoregPlist(ioDeviceTreePath string) (*ioregPlist, error) {
+ name := path.Base(ioDeviceTreePath)
+
+ args := []string{
+ "ioreg",
+ "-a", // use XML output
+ "-d", "1", // limit device tree output depth to root node
+ "-r", // root device tree at matched node
+ "-n", name, // match by name
+ }
+ out, err := exec.Command(args[0], args[1:]...).Output()
+ if err != nil {
+ return nil, errors.Wrapf(err, "ioreg query for %q failed", ioDeviceTreePath)
+ }
+ if out == nil || len(out) == 0 {
+ return nil, nil
+ }
+
+ var data []ioregPlist
+ if _, err := plist.Unmarshal(out, &data); err != nil {
+ return nil, errors.Wrapf(err, "ioreg unmarshal for %q failed", ioDeviceTreePath)
+ }
+ if len(data) != 1 {
+ err := errors.Errorf("ioreg unmarshal resulted in %d I/O device tree nodes (expected 1)", len(data))
+ return nil, err
+ }
+
+ return &data[0], nil
+}
+
+func makePartition(disk, s diskOrPartitionPlistNode, isAPFS bool) (*Partition, error) {
+ if s.Size < 0 {
+ return nil, errors.Errorf("invalid size %q of partition %q", s.Size, s.DeviceIdentifier)
+ }
+
+ var partType string
+ if isAPFS {
+ partType = "APFS Volume"
+ } else {
+ partType = s.Content
+ }
+
+ info, err := getDiskUtilInfoPlist(s.DeviceIdentifier)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Partition{
+ Disk: nil, // filled in later
+ Name: s.DeviceIdentifier,
+ Label: s.VolumeName,
+ MountPoint: s.MountPoint,
+ SizeBytes: uint64(s.Size),
+ Type: partType,
+ IsReadOnly: !info.WritableVolume,
+ UUID: s.VolumeUUID,
+ }, nil
+}
+
+// driveTypeFromPlist looks at the supplied property list struct and attempts to
+// determine the disk type
+func driveTypeFromPlist(infoPlist *diskUtilInfoPlist) DriveType {
+ dt := DRIVE_TYPE_HDD
+ if infoPlist.SolidState {
+ dt = DRIVE_TYPE_SSD
+ }
+ // TODO(jaypipes): Figure out how to determine floppy and/or CD/optical
+ // drive type on Mac
+ return dt
+}
+
+// storageControllerFromPlist looks at the supplied property list struct and
+// attempts to determine the storage controller in use for the device
+func storageControllerFromPlist(infoPlist *diskUtilInfoPlist) StorageController {
+ sc := STORAGE_CONTROLLER_SCSI
+ if strings.HasSuffix(infoPlist.DeviceTreePath, "IONVMeController") {
+ sc = STORAGE_CONTROLLER_NVME
+ }
+ // TODO(jaypipes): I don't know if Mac even supports IDE controllers and
+ // the "virtio" controller is libvirt-specific
+ return sc
+}
+
+func (info *Info) load() error {
+ if !info.ctx.EnableTools {
+ return fmt.Errorf("EnableTools=false on darwin disables block support entirely.")
+ }
+
+ listPlist, err := getDiskUtilListPlist()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ return err
+ }
+
+ info.TotalPhysicalBytes = 0
+ info.Disks = make([]*Disk, 0, len(listPlist.AllDisksAndPartitions))
+ info.Partitions = []*Partition{}
+
+ for _, disk := range listPlist.AllDisksAndPartitions {
+ if disk.Size < 0 {
+ return errors.Errorf("invalid size %q of disk %q", disk.Size, disk.DeviceIdentifier)
+ }
+
+ infoPlist, err := getDiskUtilInfoPlist(disk.DeviceIdentifier)
+ if err != nil {
+ return err
+ }
+ if infoPlist.DeviceBlockSize < 0 {
+ return errors.Errorf("invalid block size %q of disk %q", infoPlist.DeviceBlockSize, disk.DeviceIdentifier)
+ }
+
+ busPath := strings.TrimPrefix(infoPlist.DeviceTreePath, "IODeviceTree:")
+
+ ioregPlist, err := getIoregPlist(infoPlist.DeviceTreePath)
+ if err != nil {
+ return err
+ }
+ if ioregPlist == nil {
+ continue
+ }
+
+ // The NUMA node & WWN don't seem to be reported by any tools available by default in macOS.
+ diskReport := &Disk{
+ Name: disk.DeviceIdentifier,
+ SizeBytes: uint64(disk.Size),
+ PhysicalBlockSizeBytes: uint64(infoPlist.DeviceBlockSize),
+ DriveType: driveTypeFromPlist(infoPlist),
+ IsRemovable: infoPlist.Removable,
+ StorageController: storageControllerFromPlist(infoPlist),
+ BusPath: busPath,
+ NUMANodeID: -1,
+ Vendor: ioregPlist.VendorName,
+ Model: ioregPlist.ModelNumber,
+ SerialNumber: ioregPlist.SerialNumber,
+ WWN: "",
+ Partitions: make([]*Partition, 0, len(disk.Partitions)+len(disk.APFSVolumes)),
+ }
+
+ for _, partition := range disk.Partitions {
+ part, err := makePartition(disk, partition, false)
+ if err != nil {
+ return err
+ }
+ part.Disk = diskReport
+ diskReport.Partitions = append(diskReport.Partitions, part)
+ }
+ for _, volume := range disk.APFSVolumes {
+ part, err := makePartition(disk, volume, true)
+ if err != nil {
+ return err
+ }
+ part.Disk = diskReport
+ diskReport.Partitions = append(diskReport.Partitions, part)
+ }
+
+ info.TotalPhysicalBytes += uint64(disk.Size)
+ info.Disks = append(info.Disks, diskReport)
+ info.Partitions = append(info.Partitions, diskReport.Partitions...)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go
new file mode 100644
index 0000000000..a3c40c4186
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go
@@ -0,0 +1,501 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "bufio"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ sectorSize = 512
+)
+
+func (i *Info) load() error {
+ paths := linuxpath.New(i.ctx)
+ i.Disks = disks(i.ctx, paths)
+ var tpb uint64
+ for _, d := range i.Disks {
+ tpb += d.SizeBytes
+ }
+ i.TotalPhysicalBytes = tpb
+ return nil
+}
+
+func diskPhysicalBlockSizeBytes(paths *linuxpath.Paths, disk string) uint64 {
+ // We can find the sector size in Linux by looking at the
+ // /sys/block/$DEVICE/queue/physical_block_size file in sysfs
+ path := filepath.Join(paths.SysBlock, disk, "queue", "physical_block_size")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0
+ }
+ size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return 0
+ }
+ return size
+}
+
+func diskSizeBytes(paths *linuxpath.Paths, disk string) uint64 {
+ // We can find the number of 512-byte sectors by examining the contents of
+ // /sys/block/$DEVICE/size and calculate the physical bytes accordingly.
+ path := filepath.Join(paths.SysBlock, disk, "size")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0
+ }
+ size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return 0
+ }
+ return size * sectorSize
+}
+
+func diskNUMANodeID(paths *linuxpath.Paths, disk string) int {
+ link, err := os.Readlink(filepath.Join(paths.SysBlock, disk))
+ if err != nil {
+ return -1
+ }
+ for partial := link; strings.HasPrefix(partial, "../devices/"); partial = filepath.Base(partial) {
+ if nodeContents, err := ioutil.ReadFile(filepath.Join(paths.SysBlock, partial, "numa_node")); err != nil {
+ if nodeInt, err := strconv.Atoi(string(nodeContents)); err != nil {
+ return nodeInt
+ }
+ }
+ }
+ return -1
+}
+
+func diskVendor(paths *linuxpath.Paths, disk string) string {
+ // In Linux, the vendor for a disk device is found in the
+ // /sys/block/$DEVICE/device/vendor file in sysfs
+ path := filepath.Join(paths.SysBlock, disk, "device", "vendor")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return util.UNKNOWN
+ }
+ return strings.TrimSpace(string(contents))
+}
+
+// udevInfoDisk gets the udev info for a disk
+func udevInfoDisk(paths *linuxpath.Paths, disk string) (map[string]string, error) {
+ // Get device major:minor numbers
+ devNo, err := ioutil.ReadFile(filepath.Join(paths.SysBlock, disk, "dev"))
+ if err != nil {
+ return nil, err
+ }
+ return udevInfo(paths, string(devNo))
+}
+
+// udevInfoPartition gets the udev info for a partition
+func udevInfoPartition(paths *linuxpath.Paths, disk string, partition string) (map[string]string, error) {
+ // Get device major:minor numbers
+ devNo, err := ioutil.ReadFile(filepath.Join(paths.SysBlock, disk, partition, "dev"))
+ if err != nil {
+ return nil, err
+ }
+ return udevInfo(paths, string(devNo))
+}
+
+func udevInfo(paths *linuxpath.Paths, devNo string) (map[string]string, error) {
+ // Look up block device in udev runtime database
+ udevID := "b" + strings.TrimSpace(devNo)
+ udevBytes, err := ioutil.ReadFile(filepath.Join(paths.RunUdevData, udevID))
+ if err != nil {
+ return nil, err
+ }
+
+ udevInfo := make(map[string]string)
+ for _, udevLine := range strings.Split(string(udevBytes), "\n") {
+ if strings.HasPrefix(udevLine, "E:") {
+ if s := strings.SplitN(udevLine[2:], "=", 2); len(s) == 2 {
+ udevInfo[s[0]] = s[1]
+ }
+ }
+ }
+ return udevInfo, nil
+}
+
+func diskModel(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if model, ok := info["ID_MODEL"]; ok {
+ return model
+ }
+ return util.UNKNOWN
+}
+
+func diskSerialNumber(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ // There are two serial number keys, ID_SERIAL and ID_SERIAL_SHORT The
+ // non-_SHORT version often duplicates vendor information collected
+ // elsewhere, so use _SHORT and fall back to ID_SERIAL if missing...
+ if serial, ok := info["ID_SERIAL_SHORT"]; ok {
+ return serial
+ }
+ if serial, ok := info["ID_SERIAL"]; ok {
+ return serial
+ }
+ return util.UNKNOWN
+}
+
+func diskBusPath(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ // There are two path keys, ID_PATH and ID_PATH_TAG.
+ // The difference seems to be _TAG has funky characters converted to underscores.
+ if path, ok := info["ID_PATH"]; ok {
+ return path
+ }
+ return util.UNKNOWN
+}
+
+func diskWWN(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ // Trying ID_WWN_WITH_EXTENSION and falling back to ID_WWN is the same logic lsblk uses
+ if wwn, ok := info["ID_WWN_WITH_EXTENSION"]; ok {
+ return wwn
+ }
+ if wwn, ok := info["ID_WWN"]; ok {
+ return wwn
+ }
+ return util.UNKNOWN
+}
+
+// diskPartitions takes the name of a disk (note: *not* the path of the disk,
+// but just the name. In other words, "sda", not "/dev/sda" and "nvme0n1" not
+// "/dev/nvme0n1") and returns a slice of pointers to Partition structs
+// representing the partitions in that disk
+func diskPartitions(ctx *context.Context, paths *linuxpath.Paths, disk string) []*Partition {
+ out := make([]*Partition, 0)
+ path := filepath.Join(paths.SysBlock, disk)
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ ctx.Warn("failed to read disk partitions: %s\n", err)
+ return out
+ }
+ for _, file := range files {
+ fname := file.Name()
+ if !strings.HasPrefix(fname, disk) {
+ continue
+ }
+ size := partitionSizeBytes(paths, disk, fname)
+ mp, pt, ro := partitionInfo(paths, fname)
+ du := diskPartUUID(ctx, fname)
+ label := diskPartLabel(paths, disk, fname)
+ if pt == "" {
+ pt = diskPartTypeUdev(paths, disk, fname)
+ }
+ p := &Partition{
+ Name: fname,
+ SizeBytes: size,
+ MountPoint: mp,
+ Type: pt,
+ IsReadOnly: ro,
+ UUID: du,
+ Label: label,
+ }
+ out = append(out, p)
+ }
+ return out
+}
+
+func diskPartLabel(paths *linuxpath.Paths, disk string, partition string) string {
+ info, err := udevInfoPartition(paths, disk, partition)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if label, ok := info["ID_FS_LABEL"]; ok {
+ return label
+ }
+ return util.UNKNOWN
+}
+
+// diskPartTypeUdev gets the partition type from the udev database directly and its only used as fallback when
+// the partition is not mounted, so we cannot get the type from paths.ProcMounts from the partitionInfo function
+func diskPartTypeUdev(paths *linuxpath.Paths, disk string, partition string) string {
+ info, err := udevInfoPartition(paths, disk, partition)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if pType, ok := info["ID_FS_TYPE"]; ok {
+ return pType
+ }
+ return util.UNKNOWN
+}
+
+func diskPartUUID(ctx *context.Context, part string) string {
+ if !ctx.EnableTools {
+ ctx.Warn("EnableTools=false disables partition UUID detection.")
+ return ""
+ }
+ if !strings.HasPrefix(part, "/dev") {
+ part = "/dev/" + part
+ }
+ args := []string{
+ "blkid",
+ "-s",
+ "PARTUUID",
+ part,
+ }
+ out, err := exec.Command(args[0], args[1:]...).Output()
+ if err != nil {
+ ctx.Warn("failed to read disk partuuid of %s : %s\n", part, err.Error())
+ return ""
+ }
+
+ if len(out) == 0 {
+ return ""
+ }
+
+ parts := strings.Split(string(out), "PARTUUID=")
+ if len(parts) != 2 {
+ ctx.Warn("failed to parse the partuuid of %s\n", part)
+ return ""
+ }
+
+ return strings.ReplaceAll(strings.TrimSpace(parts[1]), `"`, "")
+}
+
+func diskIsRemovable(paths *linuxpath.Paths, disk string) bool {
+ path := filepath.Join(paths.SysBlock, disk, "removable")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return false
+ }
+ removable := strings.TrimSpace(string(contents))
+ return removable == "1"
+}
+
+func disks(ctx *context.Context, paths *linuxpath.Paths) []*Disk {
+ // In Linux, we could use the fdisk, lshw or blockdev commands to list disk
+ // information, however all of these utilities require root privileges to
+ // run. We can get all of this information by examining the /sys/block
+ // and /sys/class/block files
+ disks := make([]*Disk, 0)
+ files, err := ioutil.ReadDir(paths.SysBlock)
+ if err != nil {
+ return nil
+ }
+ for _, file := range files {
+ dname := file.Name()
+ if strings.HasPrefix(dname, "loop") {
+ continue
+ }
+
+ driveType, storageController := diskTypes(dname)
+ // TODO(jaypipes): Move this into diskTypes() once abstracting
+ // diskIsRotational for ease of unit testing
+ if !diskIsRotational(ctx, paths, dname) {
+ driveType = DRIVE_TYPE_SSD
+ }
+ size := diskSizeBytes(paths, dname)
+ pbs := diskPhysicalBlockSizeBytes(paths, dname)
+ busPath := diskBusPath(paths, dname)
+ node := diskNUMANodeID(paths, dname)
+ vendor := diskVendor(paths, dname)
+ model := diskModel(paths, dname)
+ serialNo := diskSerialNumber(paths, dname)
+ wwn := diskWWN(paths, dname)
+ removable := diskIsRemovable(paths, dname)
+
+ d := &Disk{
+ Name: dname,
+ SizeBytes: size,
+ PhysicalBlockSizeBytes: pbs,
+ DriveType: driveType,
+ IsRemovable: removable,
+ StorageController: storageController,
+ BusPath: busPath,
+ NUMANodeID: node,
+ Vendor: vendor,
+ Model: model,
+ SerialNumber: serialNo,
+ WWN: wwn,
+ }
+
+ parts := diskPartitions(ctx, paths, dname)
+ // Map this Disk object into the Partition...
+ for _, part := range parts {
+ part.Disk = d
+ }
+ d.Partitions = parts
+
+ disks = append(disks, d)
+ }
+
+ return disks
+}
+
+// diskTypes returns the drive type, storage controller and bus type of a disk
+func diskTypes(dname string) (
+ DriveType,
+ StorageController,
+) {
+ // The conditionals below which set the controller and drive type are
+ // based on information listed here:
+ // https://en.wikipedia.org/wiki/Device_file
+ driveType := DRIVE_TYPE_UNKNOWN
+ storageController := STORAGE_CONTROLLER_UNKNOWN
+ if strings.HasPrefix(dname, "fd") {
+ driveType = DRIVE_TYPE_FDD
+ } else if strings.HasPrefix(dname, "sd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_SCSI
+ } else if strings.HasPrefix(dname, "hd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_IDE
+ } else if strings.HasPrefix(dname, "vd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_VIRTIO
+ } else if strings.HasPrefix(dname, "nvme") {
+ driveType = DRIVE_TYPE_SSD
+ storageController = STORAGE_CONTROLLER_NVME
+ } else if strings.HasPrefix(dname, "sr") {
+ driveType = DRIVE_TYPE_ODD
+ storageController = STORAGE_CONTROLLER_SCSI
+ } else if strings.HasPrefix(dname, "xvd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_SCSI
+ } else if strings.HasPrefix(dname, "mmc") {
+ driveType = DRIVE_TYPE_SSD
+ storageController = STORAGE_CONTROLLER_MMC
+ }
+
+ return driveType, storageController
+}
+
+func diskIsRotational(ctx *context.Context, paths *linuxpath.Paths, devName string) bool {
+ path := filepath.Join(paths.SysBlock, devName, "queue", "rotational")
+ contents := util.SafeIntFromFile(ctx, path)
+ return contents == 1
+}
+
+// partitionSizeBytes returns the size in bytes of the partition given a disk
+// name and a partition name. Note: disk name and partition name do *not*
+// contain any leading "/dev" parts. In other words, they are *names*, not
+// paths.
+func partitionSizeBytes(paths *linuxpath.Paths, disk string, part string) uint64 {
+ path := filepath.Join(paths.SysBlock, disk, part, "size")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0
+ }
+ size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return 0
+ }
+ return size * sectorSize
+}
+
+// Given a full or short partition name, returns the mount point, the type of
+// the partition and whether it's readonly
+func partitionInfo(paths *linuxpath.Paths, part string) (string, string, bool) {
+ // Allow calling PartitionInfo with either the full partition name
+ // "/dev/sda1" or just "sda1"
+ if !strings.HasPrefix(part, "/dev") {
+ part = "/dev/" + part
+ }
+
+ // mount entries for mounted partitions look like this:
+ // /dev/sda6 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0
+ var r io.ReadCloser
+ r, err := os.Open(paths.ProcMounts)
+ if err != nil {
+ return "", "", true
+ }
+ defer util.SafeClose(r)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ entry := parseMountEntry(line)
+ if entry == nil || entry.Partition != part {
+ continue
+ }
+ ro := true
+ for _, opt := range entry.Options {
+ if opt == "rw" {
+ ro = false
+ break
+ }
+ }
+
+ return entry.Mountpoint, entry.FilesystemType, ro
+ }
+ return "", "", true
+}
+
+type mountEntry struct {
+ Partition string
+ Mountpoint string
+ FilesystemType string
+ Options []string
+}
+
+func parseMountEntry(line string) *mountEntry {
+ // mount entries for mounted partitions look like this:
+ // /dev/sda6 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0
+ if line[0] != '/' {
+ return nil
+ }
+ fields := strings.Fields(line)
+
+ if len(fields) < 4 {
+ return nil
+ }
+
+ // We do some special parsing of the mountpoint, which may contain space,
+ // tab and newline characters, encoded into the mount entry line using their
+ // octal-to-string representations. From the GNU mtab man pages:
+ //
+ // "Therefore these characters are encoded in the files and the getmntent
+ // function takes care of the decoding while reading the entries back in.
+ // '\040' is used to encode a space character, '\011' to encode a tab
+ // character, '\012' to encode a newline character, and '\\' to encode a
+ // backslash."
+ mp := fields[1]
+ r := strings.NewReplacer(
+ "\\011", "\t", "\\012", "\n", "\\040", " ", "\\\\", "\\",
+ )
+ mp = r.Replace(mp)
+
+ res := &mountEntry{
+ Partition: fields[0],
+ Mountpoint: mp,
+ FilesystemType: fields[2],
+ }
+ opts := strings.Split(fields[3], ",")
+ res.Options = opts
+ return res
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go
new file mode 100644
index 0000000000..f5b5164553
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !darwin && !windows
+// +build !linux,!darwin,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("blockFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go
new file mode 100644
index 0000000000..75c6f04c7b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go
@@ -0,0 +1,220 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "strings"
+
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlDiskDrive = "SELECT Caption, CreationClassName, DefaultBlockSize, Description, DeviceID, Index, InterfaceType, Manufacturer, MediaType, Model, Name, Partitions, SerialNumber, Size, TotalCylinders, TotalHeads, TotalSectors, TotalTracks, TracksPerCylinder FROM Win32_DiskDrive"
+
+type win32DiskDrive struct {
+ Caption *string
+ CreationClassName *string
+ DefaultBlockSize *uint64
+ Description *string
+ DeviceID *string
+ Index *uint32 // Used to link with partition
+ InterfaceType *string
+ Manufacturer *string
+ MediaType *string
+ Model *string
+ Name *string
+ Partitions *int32
+ SerialNumber *string
+ Size *uint64
+ TotalCylinders *int64
+ TotalHeads *int32
+ TotalSectors *int64
+ TotalTracks *int64
+ TracksPerCylinder *int32
+}
+
+const wqlDiskPartition = "SELECT Access, BlockSize, Caption, CreationClassName, Description, DeviceID, DiskIndex, Index, Name, Size, SystemName, Type FROM Win32_DiskPartition"
+
+type win32DiskPartition struct {
+ Access *uint16
+ BlockSize *uint64
+ Caption *string
+ CreationClassName *string
+ Description *string
+ DeviceID *string
+ DiskIndex *uint32 // Used to link with Disk Drive
+ Index *uint32
+ Name *string
+ Size *int64
+ SystemName *string
+ Type *string
+}
+
+const wqlLogicalDiskToPartition = "SELECT Antecedent, Dependent FROM Win32_LogicalDiskToPartition"
+
+type win32LogicalDiskToPartition struct {
+ Antecedent *string
+ Dependent *string
+}
+
+const wqlLogicalDisk = "SELECT Caption, CreationClassName, Description, DeviceID, FileSystem, FreeSpace, Name, Size, SystemName FROM Win32_LogicalDisk"
+
+type win32LogicalDisk struct {
+ Caption *string
+ CreationClassName *string
+ Description *string
+ DeviceID *string
+ FileSystem *string
+ FreeSpace *uint64
+ Name *string
+ Size *uint64
+ SystemName *string
+}
+
+func (i *Info) load() error {
+ win32DiskDriveDescriptions, err := getDiskDrives()
+ if err != nil {
+ return err
+ }
+
+ win32DiskPartitionDescriptions, err := getDiskPartitions()
+ if err != nil {
+ return err
+ }
+
+ win32LogicalDiskToPartitionDescriptions, err := getLogicalDisksToPartitions()
+ if err != nil {
+ return err
+ }
+
+ win32LogicalDiskDescriptions, err := getLogicalDisks()
+ if err != nil {
+ return err
+ }
+
+ // Converting into standard structures
+ disks := make([]*Disk, 0)
+ for _, diskdrive := range win32DiskDriveDescriptions {
+ disk := &Disk{
+ Name: strings.TrimSpace(*diskdrive.DeviceID),
+ SizeBytes: *diskdrive.Size,
+ PhysicalBlockSizeBytes: *diskdrive.DefaultBlockSize,
+ DriveType: toDriveType(*diskdrive.MediaType, *diskdrive.Caption),
+ StorageController: toStorageController(*diskdrive.InterfaceType),
+ BusPath: util.UNKNOWN, // TODO: add information
+ NUMANodeID: -1,
+ Vendor: strings.TrimSpace(*diskdrive.Manufacturer),
+ Model: strings.TrimSpace(*diskdrive.Caption),
+ SerialNumber: strings.TrimSpace(*diskdrive.SerialNumber),
+ WWN: util.UNKNOWN, // TODO: add information
+ Partitions: make([]*Partition, 0),
+ }
+ for _, diskpartition := range win32DiskPartitionDescriptions {
+ // Finding disk partition linked to current disk drive
+ if diskdrive.Index == diskpartition.DiskIndex {
+ disk.PhysicalBlockSizeBytes = *diskpartition.BlockSize
+ // Finding logical partition linked to current disk partition
+ for _, logicaldisk := range win32LogicalDiskDescriptions {
+ for _, logicaldisktodiskpartition := range win32LogicalDiskToPartitionDescriptions {
+ var desiredAntecedent = "\\\\" + *diskpartition.SystemName + "\\root\\cimv2:" + *diskpartition.CreationClassName + ".DeviceID=\"" + *diskpartition.DeviceID + "\""
+ var desiredDependent = "\\\\" + *logicaldisk.SystemName + "\\root\\cimv2:" + *logicaldisk.CreationClassName + ".DeviceID=\"" + *logicaldisk.DeviceID + "\""
+ if *logicaldisktodiskpartition.Antecedent == desiredAntecedent && *logicaldisktodiskpartition.Dependent == desiredDependent {
+ // Appending Partition
+ p := &Partition{
+ Name: strings.TrimSpace(*logicaldisk.Caption),
+ Label: strings.TrimSpace(*logicaldisk.Caption),
+ SizeBytes: *logicaldisk.Size,
+ MountPoint: *logicaldisk.DeviceID,
+ Type: *diskpartition.Type,
+ IsReadOnly: toReadOnly(*diskpartition.Access),
+ UUID: "",
+ }
+ disk.Partitions = append(disk.Partitions, p)
+ break
+ }
+ }
+ }
+ }
+ }
+ disks = append(disks, disk)
+ }
+
+ i.Disks = disks
+ var tpb uint64
+ for _, d := range i.Disks {
+ tpb += d.SizeBytes
+ }
+ i.TotalPhysicalBytes = tpb
+ return nil
+}
+
+func getDiskDrives() ([]win32DiskDrive, error) {
+ // Getting disks drives data from WMI
+ var win3232DiskDriveDescriptions []win32DiskDrive
+ if err := wmi.Query(wqlDiskDrive, &win3232DiskDriveDescriptions); err != nil {
+ return nil, err
+ }
+ return win3232DiskDriveDescriptions, nil
+}
+
+func getDiskPartitions() ([]win32DiskPartition, error) {
+ // Getting disk partitions from WMI
+ var win32DiskPartitionDescriptions []win32DiskPartition
+ if err := wmi.Query(wqlDiskPartition, &win32DiskPartitionDescriptions); err != nil {
+ return nil, err
+ }
+ return win32DiskPartitionDescriptions, nil
+}
+
+func getLogicalDisksToPartitions() ([]win32LogicalDiskToPartition, error) {
+ // Getting links between logical disks and partitions from WMI
+ var win32LogicalDiskToPartitionDescriptions []win32LogicalDiskToPartition
+ if err := wmi.Query(wqlLogicalDiskToPartition, &win32LogicalDiskToPartitionDescriptions); err != nil {
+ return nil, err
+ }
+ return win32LogicalDiskToPartitionDescriptions, nil
+}
+
+func getLogicalDisks() ([]win32LogicalDisk, error) {
+ // Getting logical disks from WMI
+ var win32LogicalDiskDescriptions []win32LogicalDisk
+ if err := wmi.Query(wqlLogicalDisk, &win32LogicalDiskDescriptions); err != nil {
+ return nil, err
+ }
+ return win32LogicalDiskDescriptions, nil
+}
+
+func toDriveType(mediaType string, caption string) DriveType {
+ mediaType = strings.ToLower(mediaType)
+ caption = strings.ToLower(caption)
+ if strings.Contains(mediaType, "fixed") || strings.Contains(mediaType, "ssd") || strings.Contains(caption, "ssd") {
+ return DRIVE_TYPE_SSD
+ } else if strings.ContainsAny(mediaType, "hdd") {
+ return DRIVE_TYPE_HDD
+ }
+ return DRIVE_TYPE_UNKNOWN
+}
+
+// TODO: improve
+func toStorageController(interfaceType string) StorageController {
+ var storageController StorageController
+ switch interfaceType {
+ case "SCSI":
+ storageController = STORAGE_CONTROLLER_SCSI
+ case "IDE":
+ storageController = STORAGE_CONTROLLER_IDE
+ default:
+ storageController = STORAGE_CONTROLLER_UNKNOWN
+ }
+ return storageController
+}
+
+// TODO: improve
+func toReadOnly(access uint16) bool {
+ // See Access property from: https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-diskpartition
+ return access == 0x1
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go
new file mode 100644
index 0000000000..a7667bbc23
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go
@@ -0,0 +1,117 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+var (
+ chassisTypeDescriptions = map[string]string{
+ "1": "Other",
+ "2": "Unknown",
+ "3": "Desktop",
+ "4": "Low profile desktop",
+ "5": "Pizza box",
+ "6": "Mini tower",
+ "7": "Tower",
+ "8": "Portable",
+ "9": "Laptop",
+ "10": "Notebook",
+ "11": "Hand held",
+ "12": "Docking station",
+ "13": "All in one",
+ "14": "Sub notebook",
+ "15": "Space-saving",
+ "16": "Lunch box",
+ "17": "Main server chassis",
+ "18": "Expansion chassis",
+ "19": "SubChassis",
+ "20": "Bus Expansion chassis",
+ "21": "Peripheral chassis",
+ "22": "RAID chassis",
+ "23": "Rack mount chassis",
+ "24": "Sealed-case PC",
+ "25": "Multi-system chassis",
+ "26": "Compact PCI",
+ "27": "Advanced TCA",
+ "28": "Blade",
+ "29": "Blade enclosure",
+ "30": "Tablet",
+ "31": "Convertible",
+ "32": "Detachable",
+ "33": "IoT gateway",
+ "34": "Embedded PC",
+ "35": "Mini PC",
+ "36": "Stick PC",
+ }
+)
+
+// Info defines chassis release information
+type Info struct {
+ ctx *context.Context
+ AssetTag string `json:"asset_tag"`
+ SerialNumber string `json:"serial_number"`
+ Type string `json:"type"`
+ TypeDescription string `json:"type_description"`
+ Vendor string `json:"vendor"`
+ Version string `json:"version"`
+}
+
+func (i *Info) String() string {
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ serialStr := ""
+ if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN {
+ serialStr = " serial=" + i.SerialNumber
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+
+ return "chassis type=" + util.ConcatStrings(
+ i.TypeDescription,
+ vendorStr,
+ serialStr,
+ versionStr,
+ )
+}
+
+// New returns a pointer to a Info struct containing information
+// about the host's chassis
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate chassis information in a top-level
+// "chassis" YAML/JSON map/object key
+type chassisPrinter struct {
+ Info *Info `json:"chassis"`
+}
+
+// YAMLString returns a string with the chassis information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, chassisPrinter{info})
+}
+
+// JSONString returns a string with the chassis information formatted as JSON
+// under a top-level "chassis:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, chassisPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go
new file mode 100644
index 0000000000..00f64de6e0
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go
@@ -0,0 +1,26 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "github.com/jaypipes/ghw/pkg/linuxdmi"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+func (i *Info) load() error {
+ i.AssetTag = linuxdmi.Item(i.ctx, "chassis_asset_tag")
+ i.SerialNumber = linuxdmi.Item(i.ctx, "chassis_serial")
+ i.Type = linuxdmi.Item(i.ctx, "chassis_type")
+ typeDesc, found := chassisTypeDescriptions[i.Type]
+ if !found {
+ typeDesc = util.UNKNOWN
+ }
+ i.TypeDescription = typeDesc
+ i.Vendor = linuxdmi.Item(i.ctx, "chassis_vendor")
+ i.Version = linuxdmi.Item(i.ctx, "chassis_version")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go
new file mode 100644
index 0000000000..0e3fd94b6c
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("chassisFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go
new file mode 100644
index 0000000000..088cbed3cb
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go
@@ -0,0 +1,43 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlChassis = "SELECT Caption, Description, Name, Manufacturer, Model, SerialNumber, Tag, TypeDescriptions, Version FROM CIM_Chassis"
+
+type win32Chassis struct {
+ Caption *string
+ Description *string
+ Name *string
+ Manufacturer *string
+ Model *string
+ SerialNumber *string
+ Tag *string
+ TypeDescriptions []string
+ Version *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32ChassisDescriptions []win32Chassis
+ if err := wmi.Query(wqlChassis, &win32ChassisDescriptions); err != nil {
+ return err
+ }
+ if len(win32ChassisDescriptions) > 0 {
+ i.AssetTag = *win32ChassisDescriptions[0].Tag
+ i.SerialNumber = *win32ChassisDescriptions[0].SerialNumber
+ i.Type = util.UNKNOWN // TODO:
+ i.TypeDescription = *win32ChassisDescriptions[0].Model
+ i.Vendor = *win32ChassisDescriptions[0].Manufacturer
+ i.Version = *win32ChassisDescriptions[0].Version
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/context/context.go b/vendor/github.com/jaypipes/ghw/pkg/context/context.go
new file mode 100644
index 0000000000..fb8de528c7
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/context/context.go
@@ -0,0 +1,178 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package context
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/snapshot"
+)
+
+// Context contains the merged set of configuration switches that act as an
+// execution context when calling internal discovery methods
+type Context struct {
+ Chroot string
+ EnableTools bool
+ SnapshotPath string
+ SnapshotRoot string
+ SnapshotExclusive bool
+ PathOverrides option.PathOverrides
+ snapshotUnpackedPath string
+ alert option.Alerter
+ err error
+}
+
+// WithContext returns an option.Option that contains a pre-existing Context
+// struct. This is useful for some internal code that sets up snapshots.
+func WithContext(ctx *Context) *option.Option {
+ return &option.Option{
+ Context: ctx,
+ }
+}
+
+// Exists returns true if the supplied (merged) Option already contains
+// a context.
+//
+// TODO(jaypipes): We can get rid of this when we combine the option and
+// context packages, which will make it easier to detect the presence of a
+// pre-setup Context.
+func Exists(opt *option.Option) bool {
+ return opt != nil && opt.Context != nil
+}
+
+// New returns a Context struct pointer that has had various options set on it
+func New(opts ...*option.Option) *Context {
+ merged := option.Merge(opts...)
+ var ctx *Context
+ if merged.Context != nil {
+ var castOK bool
+ ctx, castOK = merged.Context.(*Context)
+ if !castOK {
+ panic("passed in a non-Context for the WithContext() function!")
+ }
+ return ctx
+ }
+ ctx = &Context{
+ alert: option.EnvOrDefaultAlerter(),
+ Chroot: *merged.Chroot,
+ }
+
+ if merged.Snapshot != nil {
+ ctx.SnapshotPath = merged.Snapshot.Path
+ // root is optional, so a extra check is warranted
+ if merged.Snapshot.Root != nil {
+ ctx.SnapshotRoot = *merged.Snapshot.Root
+ }
+ ctx.SnapshotExclusive = merged.Snapshot.Exclusive
+ }
+
+ if merged.Alerter != nil {
+ ctx.alert = merged.Alerter
+ }
+
+ if merged.EnableTools != nil {
+ ctx.EnableTools = *merged.EnableTools
+ }
+
+ if merged.PathOverrides != nil {
+ ctx.PathOverrides = merged.PathOverrides
+ }
+
+ // New is not allowed to return error - it would break the established API.
+ // so the only way out is to actually do the checks here and record the error,
+ // and return it later, at the earliest possible occasion, in Setup()
+ if ctx.SnapshotPath != "" && ctx.Chroot != option.DefaultChroot {
+ // The env/client code supplied a value, but we are will overwrite it when unpacking shapshots!
+ ctx.err = fmt.Errorf("Conflicting options: chroot %q and snapshot path %q", ctx.Chroot, ctx.SnapshotPath)
+ }
+ return ctx
+}
+
+// FromEnv returns a Context that has been populated from the environs or
+// default options values
+func FromEnv() *Context {
+ chrootVal := option.EnvOrDefaultChroot()
+ enableTools := option.EnvOrDefaultTools()
+ snapPathVal := option.EnvOrDefaultSnapshotPath()
+ snapRootVal := option.EnvOrDefaultSnapshotRoot()
+ snapExclusiveVal := option.EnvOrDefaultSnapshotExclusive()
+ return &Context{
+ Chroot: chrootVal,
+ EnableTools: enableTools,
+ SnapshotPath: snapPathVal,
+ SnapshotRoot: snapRootVal,
+ SnapshotExclusive: snapExclusiveVal,
+ }
+}
+
+// Do wraps a Setup/Teardown pair around the given function
+func (ctx *Context) Do(fn func() error) error {
+ err := ctx.Setup()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ err := ctx.Teardown()
+ if err != nil {
+ ctx.Warn("teardown error: %v", err)
+ }
+ }()
+ return fn()
+}
+
+// Setup prepares the extra optional data a Context may use.
+// `Context`s are ready to use once returned by `New`. Optional features,
+// like snapshot unpacking, may require extra steps. Run `Setup` to perform them.
+// You should call `Setup` just once. It is safe to call `Setup` if you don't make
+// use of optional extra features - `Setup` will do nothing.
+func (ctx *Context) Setup() error {
+ if ctx.err != nil {
+ return ctx.err
+ }
+ if ctx.SnapshotPath == "" {
+ // nothing to do!
+ return nil
+ }
+
+ var err error
+ root := ctx.SnapshotRoot
+ if root == "" {
+ root, err = snapshot.Unpack(ctx.SnapshotPath)
+ if err == nil {
+ ctx.snapshotUnpackedPath = root
+ }
+ } else {
+ var flags uint
+ if ctx.SnapshotExclusive {
+ flags |= snapshot.OwnTargetDirectory
+ }
+ _, err = snapshot.UnpackInto(ctx.SnapshotPath, root, flags)
+ }
+ if err != nil {
+ return err
+ }
+
+ ctx.Chroot = root
+ return nil
+}
+
+// Teardown releases any resource acquired by Setup.
+// You should always call `Teardown` if you called `Setup` to free any resources
+// acquired by `Setup`. Check `Do` for more automated management.
+func (ctx *Context) Teardown() error {
+ if ctx.snapshotUnpackedPath == "" {
+ // if the client code provided the unpack directory,
+ // then it is also in charge of the cleanup.
+ return nil
+ }
+ return snapshot.Cleanup(ctx.snapshotUnpackedPath)
+}
+
+func (ctx *Context) Warn(msg string, args ...interface{}) {
+ ctx.alert.Printf("WARNING: "+msg, args...)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go
new file mode 100644
index 0000000000..2fa0cd2d06
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go
@@ -0,0 +1,169 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+// ProcessorCore describes a physical host processor core. A processor core is
+// a separate processing unit within some types of central processing units
+// (CPU).
+type ProcessorCore struct {
+ // ID is the `uint32` identifier that the host gave this core. Note that
+ // this does *not* necessarily equate to a zero-based index of the core
+ // within a physical package. For example, the core IDs for an Intel Core
+ // i7 are 0, 1, 2, 8, 9, and 10
+ ID int `json:"id"`
+ // Index is the zero-based index of the core on the physical processor
+ // package
+ Index int `json:"index"`
+ // NumThreads is the number of hardware threads associated with the core
+ NumThreads uint32 `json:"total_threads"`
+ // LogicalProcessors is a slice of ints representing the logical processor
+ // IDs assigned to any processing unit for the core
+ LogicalProcessors []int `json:"logical_processors"`
+}
+
+// String returns a short string indicating important information about the
+// processor core
+func (c *ProcessorCore) String() string {
+ return fmt.Sprintf(
+ "processor core #%d (%d threads), logical processors %v",
+ c.Index,
+ c.NumThreads,
+ c.LogicalProcessors,
+ )
+}
+
+// Processor describes a physical host central processing unit (CPU).
+type Processor struct {
+ // ID is the physical processor `uint32` ID according to the system
+ ID int `json:"id"`
+ // NumCores is the number of physical cores in the processor package
+ NumCores uint32 `json:"total_cores"`
+ // NumThreads is the number of hardware threads in the processor package
+ NumThreads uint32 `json:"total_threads"`
+ // Vendor is a string containing the vendor name
+ Vendor string `json:"vendor"`
+ // Model` is a string containing the vendor's model name
+ Model string `json:"model"`
+ // Capabilities is a slice of strings indicating the features the processor
+ // has enabled
+ Capabilities []string `json:"capabilities"`
+ // Cores is a slice of ProcessorCore` struct pointers that are packed onto
+ // this physical processor
+ Cores []*ProcessorCore `json:"cores"`
+}
+
+// HasCapability returns true if the Processor has the supplied cpuid
+// capability, false otherwise. Example of cpuid capabilities would be 'vmx' or
+// 'sse4_2'. To see a list of potential cpuid capabilitiies, see the section on
+// CPUID feature bits in the following article:
+//
+// https://en.wikipedia.org/wiki/CPUID
+func (p *Processor) HasCapability(find string) bool {
+ for _, c := range p.Capabilities {
+ if c == find {
+ return true
+ }
+ }
+ return false
+}
+
+// String returns a short string describing the Processor
+func (p *Processor) String() string {
+ ncs := "cores"
+ if p.NumCores == 1 {
+ ncs = "core"
+ }
+ nts := "threads"
+ if p.NumThreads == 1 {
+ nts = "thread"
+ }
+ return fmt.Sprintf(
+ "physical package #%d (%d %s, %d hardware %s)",
+ p.ID,
+ p.NumCores,
+ ncs,
+ p.NumThreads,
+ nts,
+ )
+}
+
+// Info describes all central processing unit (CPU) functionality on a host.
+// Returned by the `ghw.CPU()` function.
+type Info struct {
+ ctx *context.Context
+ // TotalCores is the total number of physical cores the host system
+ // contains
+ TotalCores uint32 `json:"total_cores"`
+ // TotalThreads is the total number of hardware threads the host system
+ // contains
+ TotalThreads uint32 `json:"total_threads"`
+ // Processors is a slice of Processor struct pointers, one for each
+ // physical processor package contained in the host
+ Processors []*Processor `json:"processors"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// CPUs on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// String returns a short string indicating a summary of CPU information
+func (i *Info) String() string {
+ nps := "packages"
+ if len(i.Processors) == 1 {
+ nps = "package"
+ }
+ ncs := "cores"
+ if i.TotalCores == 1 {
+ ncs = "core"
+ }
+ nts := "threads"
+ if i.TotalThreads == 1 {
+ nts = "thread"
+ }
+ return fmt.Sprintf(
+ "cpu (%d physical %s, %d %s, %d hardware %s)",
+ len(i.Processors),
+ nps,
+ i.TotalCores,
+ ncs,
+ i.TotalThreads,
+ nts,
+ )
+}
+
+// simple private struct used to encapsulate cpu information in a top-level
+// "cpu" YAML/JSON map/object key
+type cpuPrinter struct {
+ Info *Info `json:"cpu"`
+}
+
+// YAMLString returns a string with the cpu information formatted as YAML
+// under a top-level "cpu:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, cpuPrinter{i})
+}
+
+// JSONString returns a string with the cpu information formatted as JSON
+// under a top-level "cpu:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, cpuPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go
new file mode 100644
index 0000000000..44e4ced745
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go
@@ -0,0 +1,220 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+func (i *Info) load() error {
+ i.Processors = processorsGet(i.ctx)
+ var totCores uint32
+ var totThreads uint32
+ for _, p := range i.Processors {
+ totCores += p.NumCores
+ totThreads += p.NumThreads
+ }
+ i.TotalCores = totCores
+ i.TotalThreads = totThreads
+ return nil
+}
+
+func processorsGet(ctx *context.Context) []*Processor {
+ procs := make([]*Processor, 0)
+ paths := linuxpath.New(ctx)
+
+ r, err := os.Open(paths.ProcCpuinfo)
+ if err != nil {
+ return nil
+ }
+ defer util.SafeClose(r)
+
+ // An array of maps of attributes describing the logical processor
+ procAttrs := make([]map[string]string, 0)
+ curProcAttrs := make(map[string]string)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if line == "" {
+ // Output of /proc/cpuinfo has a blank newline to separate logical
+ // processors, so here we collect up all the attributes we've
+ // collected for this logical processor block
+ procAttrs = append(procAttrs, curProcAttrs)
+ // Reset the current set of processor attributes...
+ curProcAttrs = make(map[string]string)
+ continue
+ }
+ parts := strings.Split(line, ":")
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+ curProcAttrs[key] = value
+ }
+
+ // Build a set of physical processor IDs which represent the physical
+ // package of the CPU
+ setPhysicalIDs := make(map[int]bool)
+ for _, attrs := range procAttrs {
+ pid, err := strconv.Atoi(attrs["physical id"])
+ if err != nil {
+ continue
+ }
+ setPhysicalIDs[pid] = true
+ }
+
+ for pid := range setPhysicalIDs {
+ p := &Processor{
+ ID: pid,
+ }
+ // The indexes into the array of attribute maps for each logical
+ // processor within the physical processor
+ lps := make([]int, 0)
+ for x := range procAttrs {
+ lppid, err := strconv.Atoi(procAttrs[x]["physical id"])
+ if err != nil {
+ continue
+ }
+ if pid == lppid {
+ lps = append(lps, x)
+ }
+ }
+ first := procAttrs[lps[0]]
+ p.Model = first["model name"]
+ p.Vendor = first["vendor_id"]
+ numCores, err := strconv.Atoi(first["cpu cores"])
+ if err != nil {
+ continue
+ }
+ p.NumCores = uint32(numCores)
+ numThreads, err := strconv.Atoi(first["siblings"])
+ if err != nil {
+ continue
+ }
+ p.NumThreads = uint32(numThreads)
+
+ // The flags field is a space-separated list of CPU capabilities
+ p.Capabilities = strings.Split(first["flags"], " ")
+
+ cores := make([]*ProcessorCore, 0)
+ for _, lpidx := range lps {
+ lpid, err := strconv.Atoi(procAttrs[lpidx]["processor"])
+ if err != nil {
+ continue
+ }
+ coreID, err := strconv.Atoi(procAttrs[lpidx]["core id"])
+ if err != nil {
+ continue
+ }
+ var core *ProcessorCore
+ for _, c := range cores {
+ if c.ID == coreID {
+ c.LogicalProcessors = append(
+ c.LogicalProcessors,
+ lpid,
+ )
+ c.NumThreads = uint32(len(c.LogicalProcessors))
+ core = c
+ }
+ }
+ if core == nil {
+ coreLps := make([]int, 1)
+ coreLps[0] = lpid
+ core = &ProcessorCore{
+ ID: coreID,
+ Index: len(cores),
+ NumThreads: 1,
+ LogicalProcessors: coreLps,
+ }
+ cores = append(cores, core)
+ }
+ }
+ p.Cores = cores
+ procs = append(procs, p)
+ }
+ return procs
+}
+
+func CoresForNode(ctx *context.Context, nodeID int) ([]*ProcessorCore, error) {
+ // The /sys/devices/system/node/nodeX directory contains a subdirectory
+ // called 'cpuX' for each logical processor assigned to the node. Each of
+ // those subdirectories contains a topology subdirectory which has a
+ // core_id file that indicates the 0-based identifier of the physical core
+ // the logical processor (hardware thread) is on.
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ )
+ cores := make([]*ProcessorCore, 0)
+
+ findCoreByID := func(coreID int) *ProcessorCore {
+ for _, c := range cores {
+ if c.ID == coreID {
+ return c
+ }
+ }
+
+ c := &ProcessorCore{
+ ID: coreID,
+ Index: len(cores),
+ LogicalProcessors: make([]int, 0),
+ }
+ cores = append(cores, c)
+ return c
+ }
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ for _, file := range files {
+ filename := file.Name()
+ if !strings.HasPrefix(filename, "cpu") {
+ continue
+ }
+ if filename == "cpumap" || filename == "cpulist" {
+ // There are two files in the node directory that start with 'cpu'
+ // but are not subdirectories ('cpulist' and 'cpumap'). Ignore
+ // these files.
+ continue
+ }
+ // Grab the logical processor ID by cutting the integer from the
+ // /sys/devices/system/node/nodeX/cpuX filename
+ cpuPath := filepath.Join(path, filename)
+ procID, err := strconv.Atoi(filename[3:])
+ if err != nil {
+ _, _ = fmt.Fprintf(
+ os.Stderr,
+ "failed to determine procID from %s. Expected integer after 3rd char.",
+ filename,
+ )
+ continue
+ }
+ coreIDPath := filepath.Join(cpuPath, "topology", "core_id")
+ coreID := util.SafeIntFromFile(ctx, coreIDPath)
+ core := findCoreByID(coreID)
+ core.LogicalProcessors = append(
+ core.LogicalProcessors,
+ procID,
+ )
+ }
+
+ for _, c := range cores {
+ c.NumThreads = uint32(len(c.LogicalProcessors))
+ }
+
+ return cores, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go
new file mode 100644
index 0000000000..5d07ee4327
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("cpu.Info.load not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go
new file mode 100644
index 0000000000..3de1649872
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go
@@ -0,0 +1,57 @@
+//go:build !linux
+// +build !linux
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "github.com/StackExchange/wmi"
+)
+
+const wmqlProcessor = "SELECT Manufacturer, Name, NumberOfLogicalProcessors, NumberOfCores FROM Win32_Processor"
+
+type win32Processor struct {
+ Manufacturer *string
+ Name *string
+ NumberOfLogicalProcessors uint32
+ NumberOfCores uint32
+}
+
+func (i *Info) load() error {
+ // Getting info from WMI
+ var win32descriptions []win32Processor
+ if err := wmi.Query(wmqlProcessor, &win32descriptions); err != nil {
+ return err
+ }
+ // Converting into standard structures
+ i.Processors = processorsGet(win32descriptions)
+ var totCores uint32
+ var totThreads uint32
+ for _, p := range i.Processors {
+ totCores += p.NumCores
+ totThreads += p.NumThreads
+ }
+ i.TotalCores = totCores
+ i.TotalThreads = totThreads
+ return nil
+}
+
+func processorsGet(win32descriptions []win32Processor) []*Processor {
+ var procs []*Processor
+ // Converting into standard structures
+ for index, description := range win32descriptions {
+ p := &Processor{
+ ID: index,
+ Model: *description.Name,
+ Vendor: *description.Manufacturer,
+ NumCores: description.NumberOfCores,
+ NumThreads: description.NumberOfLogicalProcessors,
+ }
+ procs = append(procs, p)
+ }
+ return procs
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go
new file mode 100644
index 0000000000..65864c7e14
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go
@@ -0,0 +1,95 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/topology"
+)
+
+type GraphicsCard struct {
+ // the PCI address where the graphics card can be found
+ Address string `json:"address"`
+ // The "index" of the card on the bus (generally not useful information,
+ // but might as well include it)
+ Index int `json:"index"`
+ // pointer to a PCIDevice struct that describes the vendor and product
+ // model, etc
+ // TODO(jaypipes): Rename this field to PCI, instead of DeviceInfo
+ DeviceInfo *pci.Device `json:"pci"`
+ // Topology node that the graphics card is affined to. Will be nil if the
+ // architecture is not NUMA.
+ Node *topology.Node `json:"node,omitempty"`
+}
+
+func (card *GraphicsCard) String() string {
+ deviceStr := card.Address
+ if card.DeviceInfo != nil {
+ deviceStr = card.DeviceInfo.String()
+ }
+ nodeStr := ""
+ if card.Node != nil {
+ nodeStr = fmt.Sprintf(" [affined to NUMA node %d]", card.Node.ID)
+ }
+ return fmt.Sprintf(
+ "card #%d %s@%s",
+ card.Index,
+ nodeStr,
+ deviceStr,
+ )
+}
+
+type Info struct {
+ ctx *context.Context
+ GraphicsCards []*GraphicsCard `json:"cards"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// graphics cards on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ numCardsStr := "cards"
+ if len(i.GraphicsCards) == 1 {
+ numCardsStr = "card"
+ }
+ return fmt.Sprintf(
+ "gpu (%d graphics %s)",
+ len(i.GraphicsCards),
+ numCardsStr,
+ )
+}
+
+// simple private struct used to encapsulate gpu information in a top-level
+// "gpu" YAML/JSON map/object key
+type gpuPrinter struct {
+ Info *Info `json:"gpu"`
+}
+
+// YAMLString returns a string with the gpu information formatted as YAML
+// under a top-level "gpu:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, gpuPrinter{i})
+}
+
+// JSONString returns a string with the gpu information formatted as JSON
+// under a top-level "gpu:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, gpuPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go
new file mode 100644
index 0000000000..a2791e86d1
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go
@@ -0,0 +1,152 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/topology"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ _WARN_NO_SYS_CLASS_DRM = `
+/sys/class/drm does not exist on this system (likely the host system is a
+virtual machine or container with no graphics). Therefore,
+GPUInfo.GraphicsCards will be an empty array.
+`
+)
+
+func (i *Info) load() error {
+ // In Linux, each graphics card is listed under the /sys/class/drm
+ // directory as a symbolic link named "cardN", where N is a zero-based
+ // index of the card in the system. "DRM" stands for Direct Rendering
+ // Manager and is the Linux subsystem that is responsible for graphics I/O
+ //
+ // Each card may have multiple symbolic
+ // links in this directory representing the interfaces from the graphics
+ // card over a particular wire protocol (HDMI, DisplayPort, etc). These
+ // symbolic links are named cardN--. For
+ // instance, on one of my local workstations with an NVIDIA GTX 1050ti
+ // graphics card with one HDMI, one DisplayPort, and one DVI interface to
+ // the card, I see the following in /sys/class/drm:
+ //
+ // $ ll /sys/class/drm/
+ // total 0
+ // drwxr-xr-x 2 root root 0 Jul 16 11:50 ./
+ // drwxr-xr-x 75 root root 0 Jul 16 11:50 ../
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-DP-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-DP-1/
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-DVI-D-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-DVI-D-1/
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-HDMI-A-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-HDMI-A-1/
+ //
+ // In this routine, we are only interested in the first link (card0), which
+ // we follow to gather information about the actual device from the PCI
+ // subsystem (we query the modalias file of the PCI device's sysfs
+ // directory using the `ghw.PCIInfo.GetDevice()` function.
+ paths := linuxpath.New(i.ctx)
+ links, err := ioutil.ReadDir(paths.SysClassDRM)
+ if err != nil {
+ i.ctx.Warn(_WARN_NO_SYS_CLASS_DRM)
+ return nil
+ }
+ cards := make([]*GraphicsCard, 0)
+ for _, link := range links {
+ lname := link.Name()
+ if !strings.HasPrefix(lname, "card") {
+ continue
+ }
+ if strings.ContainsRune(lname, '-') {
+ continue
+ }
+ // Grab the card's zero-based integer index
+ lnameBytes := []byte(lname)
+ cardIdx, err := strconv.Atoi(string(lnameBytes[4:]))
+ if err != nil {
+ cardIdx = -1
+ }
+
+ // Calculate the card's PCI address by looking at the symbolic link's
+ // target
+ lpath := filepath.Join(paths.SysClassDRM, lname)
+ dest, err := os.Readlink(lpath)
+ if err != nil {
+ continue
+ }
+ pathParts := strings.Split(dest, "/")
+ numParts := len(pathParts)
+ pciAddress := pathParts[numParts-3]
+ card := &GraphicsCard{
+ Address: pciAddress,
+ Index: cardIdx,
+ }
+ cards = append(cards, card)
+ }
+ gpuFillNUMANodes(i.ctx, cards)
+ gpuFillPCIDevice(i.ctx, cards)
+ i.GraphicsCards = cards
+ return nil
+}
+
+// Loops through each GraphicsCard struct and attempts to fill the DeviceInfo
+// attribute with PCI device information
+func gpuFillPCIDevice(ctx *context.Context, cards []*GraphicsCard) {
+ pci, err := pci.New(context.WithContext(ctx))
+ if err != nil {
+ return
+ }
+ for _, card := range cards {
+ if card.DeviceInfo == nil {
+ card.DeviceInfo = pci.GetDevice(card.Address)
+ }
+ }
+}
+
+// Loops through each GraphicsCard struct and find which NUMA node the card is
+// affined to, setting the GraphicsCard.Node field accordingly. If the host
+// system is not a NUMA system, the Node field will be set to nil.
+func gpuFillNUMANodes(ctx *context.Context, cards []*GraphicsCard) {
+ paths := linuxpath.New(ctx)
+ topo, err := topology.New(context.WithContext(ctx))
+ if err != nil {
+ // Problem getting topology information so just set the graphics card's
+ // node to nil
+ for _, card := range cards {
+ if topo.Architecture != topology.ARCHITECTURE_NUMA {
+ card.Node = nil
+ }
+ }
+ return
+ }
+ for _, card := range cards {
+ // Each graphics card on a NUMA system will have a pseudo-file
+ // called /sys/class/drm/card$CARD_INDEX/device/numa_node which
+ // contains the NUMA node that the card is affined to
+ cardIndexStr := strconv.Itoa(card.Index)
+ fpath := filepath.Join(
+ paths.SysClassDRM,
+ "card"+cardIndexStr,
+ "device",
+ "numa_node",
+ )
+ nodeIdx := util.SafeIntFromFile(ctx, fpath)
+ if nodeIdx == -1 {
+ continue
+ }
+ for _, node := range topo.Nodes {
+ if nodeIdx == int(node.ID) {
+ card.Node = node
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go
new file mode 100644
index 0000000000..48991ec8e2
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("gpuFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go
new file mode 100644
index 0000000000..5fb5428149
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go
@@ -0,0 +1,131 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "strings"
+
+ "github.com/StackExchange/wmi"
+ "github.com/jaypipes/pcidb"
+
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlVideoController = "SELECT Caption, CreationClassName, Description, DeviceID, Name, PNPDeviceID, SystemCreationClassName, SystemName, VideoArchitecture, VideoMemoryType, VideoModeDescription, VideoProcessor FROM Win32_VideoController"
+
+type win32VideoController struct {
+ Caption string
+ CreationClassName string
+ Description string
+ DeviceID string
+ Name string
+ PNPDeviceID string
+ SystemCreationClassName string
+ SystemName string
+ VideoArchitecture uint16
+ VideoMemoryType uint16
+ VideoModeDescription string
+ VideoProcessor string
+}
+
+const wqlPnPEntity = "SELECT Caption, CreationClassName, Description, DeviceID, Manufacturer, Name, PNPClass, PNPDeviceID FROM Win32_PnPEntity"
+
+type win32PnPEntity struct {
+ Caption string
+ CreationClassName string
+ Description string
+ DeviceID string
+ Manufacturer string
+ Name string
+ PNPClass string
+ PNPDeviceID string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32VideoControllerDescriptions []win32VideoController
+ if err := wmi.Query(wqlVideoController, &win32VideoControllerDescriptions); err != nil {
+ return err
+ }
+
+ // Building dynamic WHERE clause with addresses to create a single query collecting all desired data
+ queryAddresses := []string{}
+ for _, description := range win32VideoControllerDescriptions {
+ var queryAddres = strings.Replace(description.PNPDeviceID, "\\", `\\`, -1)
+ queryAddresses = append(queryAddresses, "PNPDeviceID='"+queryAddres+"'")
+ }
+ whereClause := strings.Join(queryAddresses[:], " OR ")
+
+ // Getting data from WMI
+ var win32PnPDescriptions []win32PnPEntity
+ var wqlPnPDevice = wqlPnPEntity + " WHERE " + whereClause
+ if err := wmi.Query(wqlPnPDevice, &win32PnPDescriptions); err != nil {
+ return err
+ }
+
+ // Converting into standard structures
+ cards := make([]*GraphicsCard, 0)
+ for _, description := range win32VideoControllerDescriptions {
+ card := &GraphicsCard{
+ Address: description.DeviceID, // https://stackoverflow.com/questions/32073667/how-do-i-discover-the-pcie-bus-topology-and-slot-numbers-on-the-board
+ Index: 0,
+ DeviceInfo: GetDevice(description.PNPDeviceID, win32PnPDescriptions),
+ }
+ cards = append(cards, card)
+ }
+ i.GraphicsCards = cards
+ return nil
+}
+
+func GetDevice(id string, entities []win32PnPEntity) *pci.Device {
+ // Backslashing PnP address ID as requested by JSON and VMI query: https://docs.microsoft.com/en-us/windows/win32/wmisdk/where-clause
+ var queryAddress = strings.Replace(id, "\\", `\\`, -1)
+ // Preparing default structure
+ var device = &pci.Device{
+ Address: queryAddress,
+ Vendor: &pcidb.Vendor{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Products: []*pcidb.Product{},
+ },
+ Subsystem: &pcidb.Product{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Subsystems: []*pcidb.Product{},
+ },
+ Product: &pcidb.Product{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Subsystems: []*pcidb.Product{},
+ },
+ Class: &pcidb.Class{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Subclasses: []*pcidb.Subclass{},
+ },
+ Subclass: &pcidb.Subclass{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ ProgrammingInterfaces: []*pcidb.ProgrammingInterface{},
+ },
+ ProgrammingInterface: &pcidb.ProgrammingInterface{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ },
+ }
+ // If an entity is found we get its data inside the standard structure
+ for _, description := range entities {
+ if id == description.PNPDeviceID {
+ device.Vendor.ID = description.Manufacturer
+ device.Vendor.Name = description.Manufacturer
+ device.Product.ID = description.Name
+ device.Product.Name = description.Description
+ break
+ }
+ }
+ return device
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go b/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go
new file mode 100644
index 0000000000..09398d36c8
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go
@@ -0,0 +1,29 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package linuxdmi
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+func Item(ctx *context.Context, value string) string {
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(paths.SysClassDMI, "id", value)
+
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ ctx.Warn("Unable to read %s: %s\n", value, err)
+ return util.UNKNOWN
+ }
+
+ return strings.TrimSpace(string(b))
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go b/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go
new file mode 100644
index 0000000000..c5967d6194
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go
@@ -0,0 +1,115 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package linuxpath
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/jaypipes/ghw/pkg/context"
+)
+
+// PathRoots holds the roots of all the filesystem subtrees
+// ghw wants to access.
+type PathRoots struct {
+ Etc string
+ Proc string
+ Run string
+ Sys string
+ Var string
+}
+
+// DefaultPathRoots return the canonical default value for PathRoots
+func DefaultPathRoots() PathRoots {
+ return PathRoots{
+ Etc: "/etc",
+ Proc: "/proc",
+ Run: "/run",
+ Sys: "/sys",
+ Var: "/var",
+ }
+}
+
+// PathRootsFromContext initialize PathRoots from the given Context,
+// allowing overrides of the canonical default paths.
+func PathRootsFromContext(ctx *context.Context) PathRoots {
+ roots := DefaultPathRoots()
+ if pathEtc, ok := ctx.PathOverrides["/etc"]; ok {
+ roots.Etc = pathEtc
+ }
+ if pathProc, ok := ctx.PathOverrides["/proc"]; ok {
+ roots.Proc = pathProc
+ }
+ if pathRun, ok := ctx.PathOverrides["/run"]; ok {
+ roots.Run = pathRun
+ }
+ if pathSys, ok := ctx.PathOverrides["/sys"]; ok {
+ roots.Sys = pathSys
+ }
+ if pathVar, ok := ctx.PathOverrides["/var"]; ok {
+ roots.Var = pathVar
+ }
+ return roots
+}
+
+type Paths struct {
+ VarLog string
+ ProcMeminfo string
+ ProcCpuinfo string
+ ProcMounts string
+ SysKernelMMHugepages string
+ SysBlock string
+ SysDevicesSystemNode string
+ SysDevicesSystemMemory string
+ SysBusPciDevices string
+ SysClassDRM string
+ SysClassDMI string
+ SysClassNet string
+ RunUdevData string
+}
+
+// New returns a new Paths struct containing filepath fields relative to the
+// supplied Context
+func New(ctx *context.Context) *Paths {
+ roots := PathRootsFromContext(ctx)
+ return &Paths{
+ VarLog: filepath.Join(ctx.Chroot, roots.Var, "log"),
+ ProcMeminfo: filepath.Join(ctx.Chroot, roots.Proc, "meminfo"),
+ ProcCpuinfo: filepath.Join(ctx.Chroot, roots.Proc, "cpuinfo"),
+ ProcMounts: filepath.Join(ctx.Chroot, roots.Proc, "self", "mounts"),
+ SysKernelMMHugepages: filepath.Join(ctx.Chroot, roots.Sys, "kernel", "mm", "hugepages"),
+ SysBlock: filepath.Join(ctx.Chroot, roots.Sys, "block"),
+ SysDevicesSystemNode: filepath.Join(ctx.Chroot, roots.Sys, "devices", "system", "node"),
+ SysDevicesSystemMemory: filepath.Join(ctx.Chroot, roots.Sys, "devices", "system", "memory"),
+ SysBusPciDevices: filepath.Join(ctx.Chroot, roots.Sys, "bus", "pci", "devices"),
+ SysClassDRM: filepath.Join(ctx.Chroot, roots.Sys, "class", "drm"),
+ SysClassDMI: filepath.Join(ctx.Chroot, roots.Sys, "class", "dmi"),
+ SysClassNet: filepath.Join(ctx.Chroot, roots.Sys, "class", "net"),
+ RunUdevData: filepath.Join(ctx.Chroot, roots.Run, "udev", "data"),
+ }
+}
+
+func (p *Paths) NodeCPU(nodeID int, lpID int) string {
+ return filepath.Join(
+ p.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ fmt.Sprintf("cpu%d", lpID),
+ )
+}
+
+func (p *Paths) NodeCPUCache(nodeID int, lpID int) string {
+ return filepath.Join(
+ p.NodeCPU(nodeID, lpID),
+ "cache",
+ )
+}
+
+func (p *Paths) NodeCPUCacheIndex(nodeID int, lpID int, cacheIndex int) string {
+ return filepath.Join(
+ p.NodeCPUCache(nodeID, lpID),
+ fmt.Sprintf("index%d", cacheIndex),
+ )
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go b/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go
new file mode 100644
index 0000000000..e8f1bbeac9
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go
@@ -0,0 +1,47 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package marshal
+
+import (
+ "encoding/json"
+
+ "github.com/ghodss/yaml"
+ "github.com/jaypipes/ghw/pkg/context"
+)
+
+// safeYAML returns a string after marshalling the supplied parameter into YAML
+func SafeYAML(ctx *context.Context, p interface{}) string {
+ b, err := json.Marshal(p)
+ if err != nil {
+ ctx.Warn("error marshalling JSON: %s", err)
+ return ""
+ }
+ yb, err := yaml.JSONToYAML(b)
+ if err != nil {
+ ctx.Warn("error converting JSON to YAML: %s", err)
+ return ""
+ }
+ return string(yb)
+}
+
+// safeJSON returns a string after marshalling the supplied parameter into
+// JSON. Accepts an optional argument to trigger pretty/indented formatting of
+// the JSON string
+func SafeJSON(ctx *context.Context, p interface{}, indent bool) string {
+ var b []byte
+ var err error
+ if !indent {
+ b, err = json.Marshal(p)
+ } else {
+ b, err = json.MarshalIndent(&p, "", " ")
+ }
+ if err != nil {
+ ctx.Warn("error marshalling JSON: %s", err)
+ return ""
+ }
+ return string(b)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go
new file mode 100644
index 0000000000..bdf1ab1ac4
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go
@@ -0,0 +1,88 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+type Module struct {
+ Label string `json:"label"`
+ Location string `json:"location"`
+ SerialNumber string `json:"serial_number"`
+ SizeBytes int64 `json:"size_bytes"`
+ Vendor string `json:"vendor"`
+}
+
+type Area struct {
+ TotalPhysicalBytes int64 `json:"total_physical_bytes"`
+ TotalUsableBytes int64 `json:"total_usable_bytes"`
+ // An array of sizes, in bytes, of memory pages supported in this area
+ SupportedPageSizes []uint64 `json:"supported_page_sizes"`
+ Modules []*Module `json:"modules"`
+}
+
+func (a *Area) String() string {
+ tpbs := util.UNKNOWN
+ if a.TotalPhysicalBytes > 0 {
+ tpb := a.TotalPhysicalBytes
+ unit, unitStr := unitutil.AmountString(tpb)
+ tpb = int64(math.Ceil(float64(a.TotalPhysicalBytes) / float64(unit)))
+ tpbs = fmt.Sprintf("%d%s", tpb, unitStr)
+ }
+ tubs := util.UNKNOWN
+ if a.TotalUsableBytes > 0 {
+ tub := a.TotalUsableBytes
+ unit, unitStr := unitutil.AmountString(tub)
+ tub = int64(math.Ceil(float64(a.TotalUsableBytes) / float64(unit)))
+ tubs = fmt.Sprintf("%d%s", tub, unitStr)
+ }
+ return fmt.Sprintf("memory (%s physical, %s usable)", tpbs, tubs)
+}
+
+type Info struct {
+ ctx *context.Context
+ Area
+}
+
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ return i.Area.String()
+}
+
+// simple private struct used to encapsulate memory information in a top-level
+// "memory" YAML/JSON map/object key
+type memoryPrinter struct {
+ Info *Info `json:"memory"`
+}
+
+// YAMLString returns a string with the memory information formatted as YAML
+// under a top-level "memory:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, memoryPrinter{i})
+}
+
+// JSONString returns a string with the memory information formatted as JSON
+// under a top-level "memory:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, memoryPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go
new file mode 100644
index 0000000000..8bc4074def
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go
@@ -0,0 +1,127 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/unitutil"
+)
+
+type CacheType int
+
+const (
+ CACHE_TYPE_UNIFIED CacheType = iota
+ CACHE_TYPE_INSTRUCTION
+ CACHE_TYPE_DATA
+)
+
+var (
+ memoryCacheTypeString = map[CacheType]string{
+ CACHE_TYPE_UNIFIED: "Unified",
+ CACHE_TYPE_INSTRUCTION: "Instruction",
+ CACHE_TYPE_DATA: "Data",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `memoryCacheTypeString`.
+ // This is done because of the choice we made in
+ // CacheType:MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringMemoryCacheType = map[string]CacheType{
+ "unified": CACHE_TYPE_UNIFIED,
+ "instruction": CACHE_TYPE_INSTRUCTION,
+ "data": CACHE_TYPE_DATA,
+ }
+)
+
+func (a CacheType) String() string {
+ return memoryCacheTypeString[a]
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (a CacheType) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(a.String()))), nil
+}
+
+func (a *CacheType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringMemoryCacheType[key]
+ if !ok {
+ return fmt.Errorf("unknown memory cache type: %q", key)
+ }
+ *a = val
+ return nil
+}
+
+type SortByCacheLevelTypeFirstProcessor []*Cache
+
+func (a SortByCacheLevelTypeFirstProcessor) Len() int { return len(a) }
+func (a SortByCacheLevelTypeFirstProcessor) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a SortByCacheLevelTypeFirstProcessor) Less(i, j int) bool {
+ if a[i].Level < a[j].Level {
+ return true
+ } else if a[i].Level == a[j].Level {
+ if a[i].Type < a[j].Type {
+ return true
+ } else if a[i].Type == a[j].Type {
+ // NOTE(jaypipes): len(LogicalProcessors) is always >0 and is always
+ // sorted lowest LP ID to highest LP ID
+ return a[i].LogicalProcessors[0] < a[j].LogicalProcessors[0]
+ }
+ }
+ return false
+}
+
+type SortByLogicalProcessorId []uint32
+
+func (a SortByLogicalProcessorId) Len() int { return len(a) }
+func (a SortByLogicalProcessorId) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a SortByLogicalProcessorId) Less(i, j int) bool { return a[i] < a[j] }
+
+type Cache struct {
+ Level uint8 `json:"level"`
+ Type CacheType `json:"type"`
+ SizeBytes uint64 `json:"size_bytes"`
+ // The set of logical processors (hardware threads) that have access to the
+ // cache
+ LogicalProcessors []uint32 `json:"logical_processors"`
+}
+
+func (c *Cache) String() string {
+ sizeKb := c.SizeBytes / uint64(unitutil.KB)
+ typeStr := ""
+ if c.Type == CACHE_TYPE_INSTRUCTION {
+ typeStr = "i"
+ } else if c.Type == CACHE_TYPE_DATA {
+ typeStr = "d"
+ }
+ cacheIDStr := fmt.Sprintf("L%d%s", c.Level, typeStr)
+ processorMapStr := ""
+ if c.LogicalProcessors != nil {
+ lpStrings := make([]string, len(c.LogicalProcessors))
+ for x, lpid := range c.LogicalProcessors {
+ lpStrings[x] = strconv.Itoa(int(lpid))
+ }
+ processorMapStr = " shared with logical processors: " + strings.Join(lpStrings, ",")
+ }
+ return fmt.Sprintf(
+ "%s cache (%d KB)%s",
+ cacheIDStr,
+ sizeKb,
+ processorMapStr,
+ )
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go
new file mode 100644
index 0000000000..dfb5c1f1ea
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go
@@ -0,0 +1,188 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+)
+
+func CachesForNode(ctx *context.Context, nodeID int) ([]*Cache, error) {
+ // The /sys/devices/node/nodeX directory contains a subdirectory called
+ // 'cpuX' for each logical processor assigned to the node. Each of those
+ // subdirectories containers a 'cache' subdirectory which contains a number
+ // of subdirectories beginning with 'index' and ending in the cache's
+ // internal 0-based identifier. Those subdirectories contain a number of
+ // files, including 'shared_cpu_list', 'size', and 'type' which we use to
+ // determine cache characteristics.
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ )
+ caches := make(map[string]*Cache)
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ for _, file := range files {
+ filename := file.Name()
+ if !strings.HasPrefix(filename, "cpu") {
+ continue
+ }
+ if filename == "cpumap" || filename == "cpulist" {
+ // There are two files in the node directory that start with 'cpu'
+ // but are not subdirectories ('cpulist' and 'cpumap'). Ignore
+ // these files.
+ continue
+ }
+ // Grab the logical processor ID by cutting the integer from the
+ // /sys/devices/system/node/nodeX/cpuX filename
+ cpuPath := filepath.Join(path, filename)
+ lpID, _ := strconv.Atoi(filename[3:])
+
+ // Inspect the caches for each logical processor. There will be a
+ // /sys/devices/system/node/nodeX/cpuX/cache directory containing a
+ // number of directories beginning with the prefix "index" followed by
+ // a number. The number indicates the level of the cache, which
+ // indicates the "distance" from the processor. Each of these
+ // directories contains information about the size of that level of
+ // cache and the processors mapped to it.
+ cachePath := filepath.Join(cpuPath, "cache")
+ if _, err = os.Stat(cachePath); errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ cacheDirFiles, err := ioutil.ReadDir(cachePath)
+ if err != nil {
+ return nil, err
+ }
+ for _, cacheDirFile := range cacheDirFiles {
+ cacheDirFileName := cacheDirFile.Name()
+ if !strings.HasPrefix(cacheDirFileName, "index") {
+ continue
+ }
+ cacheIndex, _ := strconv.Atoi(cacheDirFileName[5:])
+
+ // The cache information is repeated for each node, so here, we
+ // just ensure that we only have a one Cache object for each
+ // unique combination of level, type and processor map
+ level := memoryCacheLevel(ctx, paths, nodeID, lpID, cacheIndex)
+ cacheType := memoryCacheType(ctx, paths, nodeID, lpID, cacheIndex)
+ sharedCpuMap := memoryCacheSharedCPUMap(ctx, paths, nodeID, lpID, cacheIndex)
+ cacheKey := fmt.Sprintf("%d-%d-%s", level, cacheType, sharedCpuMap)
+
+ cache, exists := caches[cacheKey]
+ if !exists {
+ size := memoryCacheSize(ctx, paths, nodeID, lpID, level)
+ cache = &Cache{
+ Level: uint8(level),
+ Type: cacheType,
+ SizeBytes: uint64(size) * uint64(unitutil.KB),
+ LogicalProcessors: make([]uint32, 0),
+ }
+ caches[cacheKey] = cache
+ }
+ cache.LogicalProcessors = append(
+ cache.LogicalProcessors,
+ uint32(lpID),
+ )
+ }
+ }
+
+ cacheVals := make([]*Cache, len(caches))
+ x := 0
+ for _, c := range caches {
+ // ensure the cache's processor set is sorted by logical process ID
+ sort.Sort(SortByLogicalProcessorId(c.LogicalProcessors))
+ cacheVals[x] = c
+ x++
+ }
+
+ return cacheVals, nil
+}
+
+func memoryCacheLevel(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
+ levelPath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "level",
+ )
+ levelContents, err := ioutil.ReadFile(levelPath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return -1
+ }
+ // levelContents is now a []byte with the last byte being a newline
+ // character. Trim that off and convert the contents to an integer.
+ level, err := strconv.Atoi(string(levelContents[:len(levelContents)-1]))
+ if err != nil {
+ ctx.Warn("Unable to parse int from %s", levelContents)
+ return -1
+ }
+ return level
+}
+
+func memoryCacheSize(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
+ sizePath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "size",
+ )
+ sizeContents, err := ioutil.ReadFile(sizePath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return -1
+ }
+ // size comes as XK\n, so we trim off the K and the newline.
+ size, err := strconv.Atoi(string(sizeContents[:len(sizeContents)-2]))
+ if err != nil {
+ ctx.Warn("Unable to parse int from %s", sizeContents)
+ return -1
+ }
+ return size
+}
+
+func memoryCacheType(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) CacheType {
+ typePath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "type",
+ )
+ cacheTypeContents, err := ioutil.ReadFile(typePath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return CACHE_TYPE_UNIFIED
+ }
+ switch string(cacheTypeContents[:len(cacheTypeContents)-1]) {
+ case "Data":
+ return CACHE_TYPE_DATA
+ case "Instruction":
+ return CACHE_TYPE_INSTRUCTION
+ default:
+ return CACHE_TYPE_UNIFIED
+ }
+}
+
+func memoryCacheSharedCPUMap(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) string {
+ scpuPath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "shared_cpu_map",
+ )
+ sharedCpuMap, err := ioutil.ReadFile(scpuPath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return ""
+ }
+ return string(sharedCpuMap[:len(sharedCpuMap)-1])
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go
new file mode 100644
index 0000000000..4b7631a195
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go
@@ -0,0 +1,299 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "bufio"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ _WARN_CANNOT_DETERMINE_PHYSICAL_MEMORY = `
+Could not determine total physical bytes of memory. This may
+be due to the host being a virtual machine or container with no
+/var/log/syslog file or /sys/devices/system/memory directory, or
+the current user may not have necessary privileges to read the syslog.
+We are falling back to setting the total physical amount of memory to
+the total usable amount of memory
+`
+)
+
+var (
+ // System log lines will look similar to the following:
+ // ... kernel: [0.000000] Memory: 24633272K/25155024K ...
+ _REGEX_SYSLOG_MEMLINE = regexp.MustCompile(`Memory:\s+\d+K\/(\d+)K`)
+)
+
+func (i *Info) load() error {
+ paths := linuxpath.New(i.ctx)
+ tub := memTotalUsableBytes(paths)
+ if tub < 1 {
+ return fmt.Errorf("Could not determine total usable bytes of memory")
+ }
+ i.TotalUsableBytes = tub
+ tpb := memTotalPhysicalBytes(paths)
+ i.TotalPhysicalBytes = tpb
+ if tpb < 1 {
+ i.ctx.Warn(_WARN_CANNOT_DETERMINE_PHYSICAL_MEMORY)
+ i.TotalPhysicalBytes = tub
+ }
+ i.SupportedPageSizes, _ = memorySupportedPageSizes(paths.SysKernelMMHugepages)
+ return nil
+}
+
+func AreaForNode(ctx *context.Context, nodeID int) (*Area, error) {
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ )
+
+ blockSizeBytes, err := memoryBlockSizeBytes(paths.SysDevicesSystemMemory)
+ if err != nil {
+ return nil, err
+ }
+
+ totPhys, err := memoryTotalPhysicalBytesFromPath(path, blockSizeBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ totUsable, err := memoryTotalUsableBytesFromPath(filepath.Join(path, "meminfo"))
+ if err != nil {
+ return nil, err
+ }
+
+ supportedHP, err := memorySupportedPageSizes(filepath.Join(path, "hugepages"))
+ if err != nil {
+ return nil, err
+ }
+
+ return &Area{
+ TotalPhysicalBytes: totPhys,
+ TotalUsableBytes: totUsable,
+ SupportedPageSizes: supportedHP,
+ }, nil
+}
+
+func memoryBlockSizeBytes(dir string) (uint64, error) {
+ // get the memory block size in byte in hexadecimal notation
+ blockSize := filepath.Join(dir, "block_size_bytes")
+
+ d, err := ioutil.ReadFile(blockSize)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(d)), 16, 64)
+}
+
+func memTotalPhysicalBytes(paths *linuxpath.Paths) (total int64) {
+ defer func() {
+ // fallback to the syslog file approach in case of error
+ if total < 0 {
+ total = memTotalPhysicalBytesFromSyslog(paths)
+ }
+ }()
+
+ // detect physical memory from /sys/devices/system/memory
+ dir := paths.SysDevicesSystemMemory
+ blockSizeBytes, err := memoryBlockSizeBytes(dir)
+ if err != nil {
+ total = -1
+ return total
+ }
+
+ total, err = memoryTotalPhysicalBytesFromPath(dir, blockSizeBytes)
+ if err != nil {
+ total = -1
+ }
+ return total
+}
+
+func memoryTotalPhysicalBytesFromPath(dir string, blockSizeBytes uint64) (int64, error) {
+ // iterate over memory's block /sys/.../memory*,
+ // if the memory block state is 'online' we increment the total
+ // with the memory block size to determine the amount of physical
+ // memory available on this system.
+ // This works for both system-wide:
+ // /sys/devices/system/memory/memory*
+ // and for per-numa-node report:
+ // /sys/devices/system/node/node*/memory*
+
+ sysMemory, err := filepath.Glob(filepath.Join(dir, "memory*"))
+ if err != nil {
+ return -1, err
+ } else if sysMemory == nil {
+ return -1, fmt.Errorf("cannot find memory entries in %q", dir)
+ }
+
+ var total int64
+ for _, path := range sysMemory {
+ s, err := ioutil.ReadFile(filepath.Join(path, "state"))
+ if err != nil {
+ return -1, err
+ }
+ if strings.TrimSpace(string(s)) != "online" {
+ continue
+ }
+ total += int64(blockSizeBytes)
+ }
+ return total, nil
+}
+
+func memTotalPhysicalBytesFromSyslog(paths *linuxpath.Paths) int64 {
+ // In Linux, the total physical memory can be determined by looking at the
+ // output of dmidecode, however dmidecode requires root privileges to run,
+ // so instead we examine the system logs for startup information containing
+ // total physical memory and cache the results of this.
+ findPhysicalKb := func(line string) int64 {
+ matches := _REGEX_SYSLOG_MEMLINE.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ i, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1
+ }
+ return int64(i * 1024)
+ }
+ return -1
+ }
+
+ // /var/log will contain a file called syslog and 0 or more files called
+ // syslog.$NUMBER or syslog.$NUMBER.gz containing system log records. We
+ // search each, stopping when we match a system log record line that
+ // contains physical memory information.
+ logDir := paths.VarLog
+ logFiles, err := ioutil.ReadDir(logDir)
+ if err != nil {
+ return -1
+ }
+ for _, file := range logFiles {
+ if strings.HasPrefix(file.Name(), "syslog") {
+ fullPath := filepath.Join(logDir, file.Name())
+ unzip := strings.HasSuffix(file.Name(), ".gz")
+ var r io.ReadCloser
+ r, err = os.Open(fullPath)
+ if err != nil {
+ return -1
+ }
+ defer util.SafeClose(r)
+ if unzip {
+ r, err = gzip.NewReader(r)
+ if err != nil {
+ return -1
+ }
+ }
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ size := findPhysicalKb(line)
+ if size > 0 {
+ return size
+ }
+ }
+ }
+ }
+ return -1
+}
+
+func memTotalUsableBytes(paths *linuxpath.Paths) int64 {
+ amount, err := memoryTotalUsableBytesFromPath(paths.ProcMeminfo)
+ if err != nil {
+ return -1
+ }
+ return amount
+}
+
+func memoryTotalUsableBytesFromPath(meminfoPath string) (int64, error) {
+ // In Linux, /proc/meminfo or its close relative
+ // /sys/devices/system/node/node*/meminfo
+ // contains a set of memory-related amounts, with
+ // lines looking like the following:
+ //
+ // $ cat /proc/meminfo
+ // MemTotal: 24677596 kB
+ // MemFree: 21244356 kB
+ // MemAvailable: 22085432 kB
+ // ...
+ // HugePages_Total: 0
+ // HugePages_Free: 0
+ // HugePages_Rsvd: 0
+ // HugePages_Surp: 0
+ // ...
+ //
+ // It's worth noting that /proc/meminfo returns exact information, not
+ // "theoretical" information. For instance, on the above system, I have
+ // 24GB of RAM but MemTotal is indicating only around 23GB. This is because
+ // MemTotal contains the exact amount of *usable* memory after accounting
+ // for the kernel's resident memory size and a few reserved bits.
+ // Please note GHW cares about the subset of lines shared between system-wide
+ // and per-NUMA-node meminfos. For more information, see:
+ //
+ // https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+ r, err := os.Open(meminfoPath)
+ if err != nil {
+ return -1, err
+ }
+ defer util.SafeClose(r)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Split(line, ":")
+ key := parts[0]
+ if !strings.Contains(key, "MemTotal") {
+ continue
+ }
+ rawValue := parts[1]
+ inKb := strings.HasSuffix(rawValue, "kB")
+ value, err := strconv.Atoi(strings.TrimSpace(strings.TrimSuffix(rawValue, "kB")))
+ if err != nil {
+ return -1, err
+ }
+ if inKb {
+ value = value * int(unitutil.KB)
+ }
+ return int64(value), nil
+ }
+ return -1, fmt.Errorf("failed to find MemTotal entry in path %q", meminfoPath)
+}
+
+func memorySupportedPageSizes(hpDir string) ([]uint64, error) {
+ // In Linux, /sys/kernel/mm/hugepages contains a directory per page size
+ // supported by the kernel. The directory name corresponds to the pattern
+ // 'hugepages-{pagesize}kb'
+ out := make([]uint64, 0)
+
+ files, err := ioutil.ReadDir(hpDir)
+ if err != nil {
+ return out, err
+ }
+ for _, file := range files {
+ parts := strings.Split(file.Name(), "-")
+ sizeStr := parts[1]
+ // Cut off the 'kb'
+ sizeStr = sizeStr[0 : len(sizeStr)-2]
+ size, err := strconv.Atoi(sizeStr)
+ if err != nil {
+ return out, err
+ }
+ out = append(out, uint64(size*int(unitutil.KB)))
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go
new file mode 100644
index 0000000000..6ce99e00dc
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("mem.Info.load not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go
new file mode 100644
index 0000000000..c3a3945ca9
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go
@@ -0,0 +1,72 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/unitutil"
+)
+
+const wqlOperatingSystem = "SELECT TotalVisibleMemorySize FROM Win32_OperatingSystem"
+
+type win32OperatingSystem struct {
+ TotalVisibleMemorySize *uint64
+}
+
+const wqlPhysicalMemory = "SELECT BankLabel, Capacity, DataWidth, Description, DeviceLocator, Manufacturer, Model, Name, PartNumber, PositionInRow, SerialNumber, Speed, Tag, TotalWidth FROM Win32_PhysicalMemory"
+
+type win32PhysicalMemory struct {
+ BankLabel *string
+ Capacity *uint64
+ DataWidth *uint16
+ Description *string
+ DeviceLocator *string
+ Manufacturer *string
+ Model *string
+ Name *string
+ PartNumber *string
+ PositionInRow *uint32
+ SerialNumber *string
+ Speed *uint32
+ Tag *string
+ TotalWidth *uint16
+}
+
+func (i *Info) load() error {
+ // Getting info from WMI
+ var win32OSDescriptions []win32OperatingSystem
+ if err := wmi.Query(wqlOperatingSystem, &win32OSDescriptions); err != nil {
+ return err
+ }
+ var win32MemDescriptions []win32PhysicalMemory
+ if err := wmi.Query(wqlPhysicalMemory, &win32MemDescriptions); err != nil {
+ return err
+ }
+ // We calculate total physical memory size by summing the DIMM sizes
+ var totalPhysicalBytes uint64
+ i.Modules = make([]*Module, 0, len(win32MemDescriptions))
+ for _, description := range win32MemDescriptions {
+ totalPhysicalBytes += *description.Capacity
+ i.Modules = append(i.Modules, &Module{
+ Label: *description.BankLabel,
+ Location: *description.DeviceLocator,
+ SerialNumber: *description.SerialNumber,
+ SizeBytes: int64(*description.Capacity),
+ Vendor: *description.Manufacturer,
+ })
+ }
+ var totalUsableBytes uint64
+ for _, description := range win32OSDescriptions {
+ // TotalVisibleMemorySize is the amount of memory available for us by
+ // the operating system **in Kilobytes**
+ totalUsableBytes += *description.TotalVisibleMemorySize * uint64(unitutil.KB)
+ }
+ i.TotalUsableBytes = int64(totalUsableBytes)
+ i.TotalPhysicalBytes = int64(totalPhysicalBytes)
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net.go b/vendor/github.com/jaypipes/ghw/pkg/net/net.go
new file mode 100644
index 0000000000..8994d112ec
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net.go
@@ -0,0 +1,83 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+type NICCapability struct {
+ Name string `json:"name"`
+ IsEnabled bool `json:"is_enabled"`
+ CanEnable bool `json:"can_enable"`
+}
+
+type NIC struct {
+ Name string `json:"name"`
+ MacAddress string `json:"mac_address"`
+ IsVirtual bool `json:"is_virtual"`
+ Capabilities []*NICCapability `json:"capabilities"`
+ PCIAddress *string `json:"pci_address,omitempty"`
+ // TODO(fromani): add other hw addresses (USB) when we support them
+}
+
+func (n *NIC) String() string {
+ isVirtualStr := ""
+ if n.IsVirtual {
+ isVirtualStr = " (virtual)"
+ }
+ return fmt.Sprintf(
+ "%s%s",
+ n.Name,
+ isVirtualStr,
+ )
+}
+
+type Info struct {
+ ctx *context.Context
+ NICs []*NIC `json:"nics"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// network interface controllers (NICs) on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ return fmt.Sprintf(
+ "net (%d NICs)",
+ len(i.NICs),
+ )
+}
+
+// simple private struct used to encapsulate net information in a
+// top-level "net" YAML/JSON map/object key
+type netPrinter struct {
+ Info *Info `json:"network"`
+}
+
+// YAMLString returns a string with the net information formatted as YAML
+// under a top-level "net:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, netPrinter{i})
+}
+
+// JSONString returns a string with the net information formatted as JSON
+// under a top-level "net:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, netPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go
new file mode 100644
index 0000000000..1b338dfaf4
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go
@@ -0,0 +1,222 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+)
+
+const (
+ _WARN_ETHTOOL_NOT_INSTALLED = `ethtool not installed. Cannot grab NIC capabilities`
+)
+
+func (i *Info) load() error {
+ i.NICs = nics(i.ctx)
+ return nil
+}
+
+func nics(ctx *context.Context) []*NIC {
+ nics := make([]*NIC, 0)
+
+ paths := linuxpath.New(ctx)
+ files, err := ioutil.ReadDir(paths.SysClassNet)
+ if err != nil {
+ return nics
+ }
+
+ etAvailable := ctx.EnableTools
+ if etAvailable {
+ if etInstalled := ethtoolInstalled(); !etInstalled {
+ ctx.Warn(_WARN_ETHTOOL_NOT_INSTALLED)
+ etAvailable = false
+ }
+ }
+
+ for _, file := range files {
+ filename := file.Name()
+ // Ignore loopback...
+ if filename == "lo" {
+ continue
+ }
+
+ netPath := filepath.Join(paths.SysClassNet, filename)
+ dest, _ := os.Readlink(netPath)
+ isVirtual := false
+ if strings.Contains(dest, "devices/virtual/net") {
+ isVirtual = true
+ }
+
+ nic := &NIC{
+ Name: filename,
+ IsVirtual: isVirtual,
+ }
+
+ mac := netDeviceMacAddress(paths, filename)
+ nic.MacAddress = mac
+ if etAvailable {
+ nic.Capabilities = netDeviceCapabilities(ctx, filename)
+ } else {
+ nic.Capabilities = []*NICCapability{}
+ }
+
+ nic.PCIAddress = netDevicePCIAddress(paths.SysClassNet, filename)
+
+ nics = append(nics, nic)
+ }
+ return nics
+}
+
+func netDeviceMacAddress(paths *linuxpath.Paths, dev string) string {
+ // Instead of use udevadm, we can get the device's MAC address by examing
+ // the /sys/class/net/$DEVICE/address file in sysfs. However, for devices
+ // that have addr_assign_type != 0, return None since the MAC address is
+ // random.
+ aatPath := filepath.Join(paths.SysClassNet, dev, "addr_assign_type")
+ contents, err := ioutil.ReadFile(aatPath)
+ if err != nil {
+ return ""
+ }
+ if strings.TrimSpace(string(contents)) != "0" {
+ return ""
+ }
+ addrPath := filepath.Join(paths.SysClassNet, dev, "address")
+ contents, err = ioutil.ReadFile(addrPath)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(contents))
+}
+
+func ethtoolInstalled() bool {
+ _, err := exec.LookPath("ethtool")
+ return err == nil
+}
+
+func netDeviceCapabilities(ctx *context.Context, dev string) []*NICCapability {
+ caps := make([]*NICCapability, 0)
+ path, _ := exec.LookPath("ethtool")
+ cmd := exec.Command(path, "-k", dev)
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := cmd.Run()
+ if err != nil {
+ msg := fmt.Sprintf("could not grab NIC capabilities for %s: %s", dev, err)
+ ctx.Warn(msg)
+ return caps
+ }
+
+ // The out variable will now contain something that looks like the
+ // following.
+ //
+ // Features for enp58s0f1:
+ // rx-checksumming: on
+ // tx-checksumming: off
+ // tx-checksum-ipv4: off
+ // tx-checksum-ip-generic: off [fixed]
+ // tx-checksum-ipv6: off
+ // tx-checksum-fcoe-crc: off [fixed]
+ // tx-checksum-sctp: off [fixed]
+ // scatter-gather: off
+ // tx-scatter-gather: off
+ // tx-scatter-gather-fraglist: off [fixed]
+ // tcp-segmentation-offload: off
+ // tx-tcp-segmentation: off
+ // tx-tcp-ecn-segmentation: off [fixed]
+ // tx-tcp-mangleid-segmentation: off
+ // tx-tcp6-segmentation: off
+ // < snipped >
+ scanner := bufio.NewScanner(&out)
+ // Skip the first line...
+ scanner.Scan()
+ for scanner.Scan() {
+ line := strings.TrimPrefix(scanner.Text(), "\t")
+ caps = append(caps, netParseEthtoolFeature(line))
+ }
+ return caps
+}
+
+// netParseEthtoolFeature parses a line from the ethtool -k output and returns
+// a NICCapability.
+//
+// The supplied line will look like the following:
+//
+// tx-checksum-ip-generic: off [fixed]
+//
+// [fixed] indicates that the feature may not be turned on/off. Note: it makes
+// no difference whether a privileged user runs `ethtool -k` when determining
+// whether [fixed] appears for a feature.
+func netParseEthtoolFeature(line string) *NICCapability {
+ parts := strings.Fields(line)
+ cap := strings.TrimSuffix(parts[0], ":")
+ enabled := parts[1] == "on"
+ fixed := len(parts) == 3 && parts[2] == "[fixed]"
+ return &NICCapability{
+ Name: cap,
+ IsEnabled: enabled,
+ CanEnable: !fixed,
+ }
+}
+
+func netDevicePCIAddress(netDevDir, netDevName string) *string {
+ // what we do here is not that hard in the end: we need to navigate the sysfs
+ // up to the directory belonging to the device backing the network interface.
+ // we can make few relatively safe assumptions, but the safest way is follow
+ // the right links. And so we go.
+ // First of all, knowing the network device name we need to resolve the backing
+ // device path to its full sysfs path.
+ // say we start with netDevDir="/sys/class/net" and netDevName="enp0s31f6"
+ netPath := filepath.Join(netDevDir, netDevName)
+ dest, err := os.Readlink(netPath)
+ if err != nil {
+ // bail out with empty value
+ return nil
+ }
+ // now we have something like dest="../../devices/pci0000:00/0000:00:1f.6/net/enp0s31f6"
+ // remember the path is relative to netDevDir="/sys/class/net"
+
+ netDev := filepath.Clean(filepath.Join(netDevDir, dest))
+ // so we clean "/sys/class/net/../../devices/pci0000:00/0000:00:1f.6/net/enp0s31f6"
+ // leading to "/sys/devices/pci0000:00/0000:00:1f.6/net/enp0s31f6"
+ // still not there. We need to access the data of the pci device. So we jump into the path
+ // linked to the "device" pseudofile
+ dest, err = os.Readlink(filepath.Join(netDev, "device"))
+ if err != nil {
+ // bail out with empty value
+ return nil
+ }
+ // we expect something like="../../../0000:00:1f.6"
+
+ devPath := filepath.Clean(filepath.Join(netDev, dest))
+ // so we clean "/sys/devices/pci0000:00/0000:00:1f.6/net/enp0s31f6/../../../0000:00:1f.6"
+ // leading to "/sys/devices/pci0000:00/0000:00:1f.6/"
+ // finally here!
+
+ // to which bus is this device connected to?
+ dest, err = os.Readlink(filepath.Join(devPath, "subsystem"))
+ if err != nil {
+ // bail out with empty value
+ return nil
+ }
+ // ok, this is hacky, but since we need the last *two* path components and we know we
+ // are running on linux...
+ if !strings.HasSuffix(dest, "/bus/pci") {
+ // unsupported and unexpected bus!
+ return nil
+ }
+
+ pciAddr := filepath.Base(devPath)
+ return &pciAddr
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go
new file mode 100644
index 0000000000..c8dfa090d5
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("netFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go
new file mode 100644
index 0000000000..0b46aa566e
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go
@@ -0,0 +1,74 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "strings"
+
+ "github.com/StackExchange/wmi"
+)
+
+const wqlNetworkAdapter = "SELECT Description, DeviceID, Index, InterfaceIndex, MACAddress, Manufacturer, Name, NetConnectionID, ProductName, ServiceName, PhysicalAdapter FROM Win32_NetworkAdapter"
+
+type win32NetworkAdapter struct {
+ Description *string
+ DeviceID *string
+ Index *uint32
+ InterfaceIndex *uint32
+ MACAddress *string
+ Manufacturer *string
+ Name *string
+ NetConnectionID *string
+ ProductName *string
+ ServiceName *string
+ PhysicalAdapter *bool
+}
+
+func (i *Info) load() error {
+ // Getting info from WMI
+ var win32NetDescriptions []win32NetworkAdapter
+ if err := wmi.Query(wqlNetworkAdapter, &win32NetDescriptions); err != nil {
+ return err
+ }
+
+ i.NICs = nics(win32NetDescriptions)
+ return nil
+}
+
+func nics(win32NetDescriptions []win32NetworkAdapter) []*NIC {
+ // Converting into standard structures
+ nics := make([]*NIC, 0)
+ for _, nicDescription := range win32NetDescriptions {
+ nic := &NIC{
+ Name: netDeviceName(nicDescription),
+ MacAddress: *nicDescription.MACAddress,
+ IsVirtual: netIsVirtual(nicDescription),
+ Capabilities: []*NICCapability{},
+ }
+ // Appenging NIC to NICs
+ nics = append(nics, nic)
+ }
+
+ return nics
+}
+
+func netDeviceName(description win32NetworkAdapter) string {
+ var name string
+ if strings.TrimSpace(*description.NetConnectionID) != "" {
+ name = *description.NetConnectionID + " - " + *description.Description
+ } else {
+ name = *description.Description
+ }
+ return name
+}
+
+func netIsVirtual(description win32NetworkAdapter) bool {
+ if description.PhysicalAdapter == nil {
+ return false
+ }
+
+ return !(*description.PhysicalAdapter)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/option/option.go b/vendor/github.com/jaypipes/ghw/pkg/option/option.go
new file mode 100644
index 0000000000..6cd231de3d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/option/option.go
@@ -0,0 +1,259 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package option
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+)
+
+const (
+ DefaultChroot = "/"
+)
+
+const (
+ envKeyChroot = "GHW_CHROOT"
+ envKeyDisableWarnings = "GHW_DISABLE_WARNINGS"
+ envKeyDisableTools = "GHW_DISABLE_TOOLS"
+ envKeySnapshotPath = "GHW_SNAPSHOT_PATH"
+ envKeySnapshotRoot = "GHW_SNAPSHOT_ROOT"
+ envKeySnapshotExclusive = "GHW_SNAPSHOT_EXCLUSIVE"
+ envKeySnapshotPreserve = "GHW_SNAPSHOT_PRESERVE"
+)
+
+// Alerter emits warnings about undesirable but recoverable errors.
+// We use a subset of a logger interface only to emit warnings, and
+// `Warninger` sounded ugly.
+type Alerter interface {
+ Printf(format string, v ...interface{})
+}
+
+var (
+ NullAlerter = log.New(ioutil.Discard, "", 0)
+)
+
+// EnvOrDefaultAlerter returns the default instance ghw will use to emit
+// its warnings. ghw will emit warnings to stderr by default unless the
+// environs variable GHW_DISABLE_WARNINGS is specified; in the latter case
+// all warning will be suppressed.
+func EnvOrDefaultAlerter() Alerter {
+ var dest io.Writer
+ if _, exists := os.LookupEnv(envKeyDisableWarnings); exists {
+ dest = ioutil.Discard
+ } else {
+ // default
+ dest = os.Stderr
+ }
+ return log.New(dest, "", 0)
+}
+
+// EnvOrDefaultChroot returns the value of the GHW_CHROOT environs variable or
+// the default value of "/" if not set
+func EnvOrDefaultChroot() string {
+ // Grab options from the environs by default
+ if val, exists := os.LookupEnv(envKeyChroot); exists {
+ return val
+ }
+ return DefaultChroot
+}
+
+// EnvOrDefaultSnapshotPath returns the value of the GHW_SNAPSHOT_PATH environs variable
+// or the default value of "" (disable snapshot consumption) if not set
+func EnvOrDefaultSnapshotPath() string {
+ if val, exists := os.LookupEnv(envKeySnapshotPath); exists {
+ return val
+ }
+ return "" // default is no snapshot
+}
+
+// EnvOrDefaultSnapshotRoot returns the value of the the GHW_SNAPSHOT_ROOT environs variable
+// or the default value of "" (self-manage the snapshot unpack directory, if relevant) if not set
+func EnvOrDefaultSnapshotRoot() string {
+ if val, exists := os.LookupEnv(envKeySnapshotRoot); exists {
+ return val
+ }
+ return "" // default is to self-manage the snapshot directory
+}
+
+// EnvOrDefaultSnapshotExclusive returns the value of the GHW_SNAPSHOT_EXCLUSIVE environs variable
+// or the default value of false if not set
+func EnvOrDefaultSnapshotExclusive() bool {
+ if _, exists := os.LookupEnv(envKeySnapshotExclusive); exists {
+ return true
+ }
+ return false
+}
+
+// EnvOrDefaultSnapshotPreserve returns the value of the GHW_SNAPSHOT_PRESERVE environs variable
+// or the default value of false if not set
+func EnvOrDefaultSnapshotPreserve() bool {
+ if _, exists := os.LookupEnv(envKeySnapshotPreserve); exists {
+ return true
+ }
+ return false
+}
+
+// EnvOrDefaultTools return true if ghw should use external tools to augment the data collected
+// from sysfs. Most users want to do this most of time, so this is enabled by default.
+// Users consuming snapshots may want to opt out, thus they can set the GHW_DISABLE_TOOLS
+// environs variable to any value to make ghw skip calling external tools even if they are available.
+func EnvOrDefaultTools() bool {
+ if _, exists := os.LookupEnv(envKeyDisableTools); exists {
+ return false
+ }
+ return true
+}
+
+// Option is used to represent optionally-configured settings. Each field is a
+// pointer to some concrete value so that we can tell when something has been
+// set or left unset.
+type Option struct {
+ // To facilitate querying of sysfs filesystems that are bind-mounted to a
+ // non-default root mountpoint, we allow users to set the GHW_CHROOT environ
+ // variable to an alternate mountpoint. For instance, assume that the user of
+ // ghw is a Golang binary being executed from an application container that has
+ // certain host filesystems bind-mounted into the container at /host. The user
+ // would ensure the GHW_CHROOT environ variable is set to "/host" and ghw will
+ // build its paths from that location instead of /
+ Chroot *string
+
+ // Snapshot contains options for handling ghw snapshots
+ Snapshot *SnapshotOptions
+
+ // Alerter contains the target for ghw warnings
+ Alerter Alerter
+
+ // EnableTools optionally request ghw to not call any external program to learn
+ // about the hardware. The default is to use such tools if available.
+ EnableTools *bool
+
+ // PathOverrides optionally allows to override the default paths ghw uses internally
+ // to learn about the system resources.
+ PathOverrides PathOverrides
+
+ // Context may contain a pointer to a `Context` struct that is constructed
+ // during a call to the `context.WithContext` function. Only used internally.
+ // This is an interface to get around recursive package import issues.
+ Context interface{}
+}
+
+// SnapshotOptions contains options for handling of ghw snapshots
+type SnapshotOptions struct {
+ // Path allows users to specify a snapshot (captured using ghw-snapshot) to be
+ // automatically consumed. Users need to supply the path of the snapshot, and
+ // ghw will take care of unpacking it on a temporary directory.
+ // Set the environment variable "GHW_SNAPSHOT_PRESERVE" to make ghw skip the cleanup
+ // stage and keep the unpacked snapshot in the temporary directory.
+ Path string
+ // Root is the directory on which the snapshot must be unpacked. This allows
+ // the users to manage their snapshot directory instead of ghw doing that on
+ // their behalf. Relevant only if SnapshotPath is given.
+ Root *string
+ // Exclusive tells ghw if the given directory should be considered of exclusive
+ // usage of ghw or not If the user provides a Root. If the flag is set, ghw will
+ // unpack the snapshot in the given SnapshotRoot iff the directory is empty; otherwise
+ // any existing content will be left untouched and the unpack stage will exit silently.
+ // As additional side effect, give both this option and SnapshotRoot to make each
+ // context try to unpack the snapshot only once.
+ Exclusive bool
+}
+
+// WithChroot allows to override the root directory ghw uses.
+func WithChroot(dir string) *Option {
+ return &Option{Chroot: &dir}
+}
+
+// WithSnapshot sets snapshot-processing options for a ghw run
+func WithSnapshot(opts SnapshotOptions) *Option {
+ return &Option{
+ Snapshot: &opts,
+ }
+}
+
+// WithAlerter sets alerting options for ghw
+func WithAlerter(alerter Alerter) *Option {
+ return &Option{
+ Alerter: alerter,
+ }
+}
+
+// WithNullAlerter sets No-op alerting options for ghw
+func WithNullAlerter() *Option {
+ return &Option{
+ Alerter: NullAlerter,
+ }
+}
+
+// WithDisableTools sets enables or prohibts ghw to call external tools to discover hardware capabilities.
+func WithDisableTools() *Option {
+ false_ := false
+ return &Option{EnableTools: &false_}
+}
+
+// PathOverrides is a map, keyed by the string name of a mount path, of override paths
+type PathOverrides map[string]string
+
+// WithPathOverrides supplies path-specific overrides for the context
+func WithPathOverrides(overrides PathOverrides) *Option {
+ return &Option{
+ PathOverrides: overrides,
+ }
+}
+
+// There is intentionally no Option related to GHW_SNAPSHOT_PRESERVE because we see that as
+// a debug/troubleshoot aid more something users wants to do regularly.
+// Hence we allow that only via the environment variable for the time being.
+
+// Merge accepts one or more Options and merges them together, returning the
+// merged Option
+func Merge(opts ...*Option) *Option {
+ merged := &Option{}
+ for _, opt := range opts {
+ if opt.Chroot != nil {
+ merged.Chroot = opt.Chroot
+ }
+ if opt.Snapshot != nil {
+ merged.Snapshot = opt.Snapshot
+ }
+ if opt.Alerter != nil {
+ merged.Alerter = opt.Alerter
+ }
+ if opt.EnableTools != nil {
+ merged.EnableTools = opt.EnableTools
+ }
+ // intentionally only programmatically
+ if opt.PathOverrides != nil {
+ merged.PathOverrides = opt.PathOverrides
+ }
+ if opt.Context != nil {
+ merged.Context = opt.Context
+ }
+ }
+ // Set the default value if missing from mergeOpts
+ if merged.Chroot == nil {
+ chroot := EnvOrDefaultChroot()
+ merged.Chroot = &chroot
+ }
+ if merged.Alerter == nil {
+ merged.Alerter = EnvOrDefaultAlerter()
+ }
+ if merged.Snapshot == nil {
+ snapRoot := EnvOrDefaultSnapshotRoot()
+ merged.Snapshot = &SnapshotOptions{
+ Path: EnvOrDefaultSnapshotPath(),
+ Root: &snapRoot,
+ Exclusive: EnvOrDefaultSnapshotExclusive(),
+ }
+ }
+ if merged.EnableTools == nil {
+ enabled := EnvOrDefaultTools()
+ merged.EnableTools = &enabled
+ }
+ return merged
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go b/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go
new file mode 100644
index 0000000000..6a8a4e4575
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go
@@ -0,0 +1,55 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package address
+
+import (
+ "regexp"
+ "strings"
+)
+
+var (
+ regexAddress *regexp.Regexp = regexp.MustCompile(
+ `^(([0-9a-f]{0,4}):)?([0-9a-f]{2}):([0-9a-f]{2})\.([0-9a-f]{1})$`,
+ )
+)
+
+// Address contains the components of a PCI Address
+type Address struct {
+ Domain string
+ Bus string
+ Device string
+ Function string
+}
+
+// String() returns the canonical [D]BDF representation of this Address
+func (addr *Address) String() string {
+ return addr.Domain + ":" + addr.Bus + ":" + addr.Device + "." + addr.Function
+}
+
+// FromString returns an Address struct from an ddress string in either
+// $BUS:$DEVICE.$FUNCTION (BDF) format or it can be a full PCI address that
+// includes the 4-digit $DOMAIN information as well:
+// $DOMAIN:$BUS:$DEVICE.$FUNCTION.
+//
+// Returns "" if the address string wasn't a valid PCI address.
+func FromString(address string) *Address {
+ addrLowered := strings.ToLower(address)
+ matches := regexAddress.FindStringSubmatch(addrLowered)
+ if len(matches) == 6 {
+ dom := "0000"
+ if matches[1] != "" {
+ dom = matches[2]
+ }
+ return &Address{
+ Domain: dom,
+ Bus: matches[3],
+ Device: matches[4],
+ Function: matches[5],
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go
new file mode 100644
index 0000000000..86cc7b2522
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go
@@ -0,0 +1,211 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pci
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/jaypipes/pcidb"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ pciaddr "github.com/jaypipes/ghw/pkg/pci/address"
+ "github.com/jaypipes/ghw/pkg/topology"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// backward compatibility, to be removed in 1.0.0
+type Address pciaddr.Address
+
+// backward compatibility, to be removed in 1.0.0
+var AddressFromString = pciaddr.FromString
+
+type Device struct {
+ // The PCI address of the device
+ Address string `json:"address"`
+ Vendor *pcidb.Vendor `json:"vendor"`
+ Product *pcidb.Product `json:"product"`
+ Revision string `json:"revision"`
+ Subsystem *pcidb.Product `json:"subsystem"`
+ // optional subvendor/sub-device information
+ Class *pcidb.Class `json:"class"`
+ // optional sub-class for the device
+ Subclass *pcidb.Subclass `json:"subclass"`
+ // optional programming interface
+ ProgrammingInterface *pcidb.ProgrammingInterface `json:"programming_interface"`
+ // Topology node that the PCI device is affined to. Will be nil if the
+ // architecture is not NUMA.
+ Node *topology.Node `json:"node,omitempty"`
+ Driver string `json:"driver"`
+}
+
+type devIdent struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+}
+
+type devMarshallable struct {
+ Driver string `json:"driver"`
+ Address string `json:"address"`
+ Vendor devIdent `json:"vendor"`
+ Product devIdent `json:"product"`
+ Revision string `json:"revision"`
+ Subsystem devIdent `json:"subsystem"`
+ Class devIdent `json:"class"`
+ Subclass devIdent `json:"subclass"`
+ Interface devIdent `json:"programming_interface"`
+}
+
+// NOTE(jaypipes) Device has a custom JSON marshaller because we don't want
+// to serialize the entire PCIDB information for the Vendor (which includes all
+// of the vendor's products, etc). Instead, we simply serialize the ID and
+// human-readable name of the vendor, product, class, etc.
+func (d *Device) MarshalJSON() ([]byte, error) {
+ dm := devMarshallable{
+ Driver: d.Driver,
+ Address: d.Address,
+ Vendor: devIdent{
+ ID: d.Vendor.ID,
+ Name: d.Vendor.Name,
+ },
+ Product: devIdent{
+ ID: d.Product.ID,
+ Name: d.Product.Name,
+ },
+ Revision: d.Revision,
+ Subsystem: devIdent{
+ ID: d.Subsystem.ID,
+ Name: d.Subsystem.Name,
+ },
+ Class: devIdent{
+ ID: d.Class.ID,
+ Name: d.Class.Name,
+ },
+ Subclass: devIdent{
+ ID: d.Subclass.ID,
+ Name: d.Subclass.Name,
+ },
+ Interface: devIdent{
+ ID: d.ProgrammingInterface.ID,
+ Name: d.ProgrammingInterface.Name,
+ },
+ }
+ return json.Marshal(dm)
+}
+
+func (d *Device) String() string {
+ vendorName := util.UNKNOWN
+ if d.Vendor != nil {
+ vendorName = d.Vendor.Name
+ }
+ productName := util.UNKNOWN
+ if d.Product != nil {
+ productName = d.Product.Name
+ }
+ className := util.UNKNOWN
+ if d.Class != nil {
+ className = d.Class.Name
+ }
+ return fmt.Sprintf(
+ "%s -> driver: '%s' class: '%s' vendor: '%s' product: '%s'",
+ d.Address,
+ d.Driver,
+ className,
+ vendorName,
+ productName,
+ )
+}
+
+type Info struct {
+ arch topology.Architecture
+ ctx *context.Context
+ // All PCI devices on the host system
+ Devices []*Device
+ // hash of class ID -> class information
+ // DEPRECATED. Will be removed in v1.0. Please use
+ // github.com/jaypipes/pcidb to explore PCIDB information
+ Classes map[string]*pcidb.Class `json:"-"`
+ // hash of vendor ID -> vendor information
+ // DEPRECATED. Will be removed in v1.0. Please use
+ // github.com/jaypipes/pcidb to explore PCIDB information
+ Vendors map[string]*pcidb.Vendor `json:"-"`
+ // hash of vendor ID + product/device ID -> product information
+ // DEPRECATED. Will be removed in v1.0. Please use
+ // github.com/jaypipes/pcidb to explore PCIDB information
+ Products map[string]*pcidb.Product `json:"-"`
+}
+
+func (i *Info) String() string {
+ return fmt.Sprintf("PCI (%d devices)", len(i.Devices))
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// PCI devices on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ merged := option.Merge(opts...)
+ ctx := context.New(merged)
+ // by default we don't report NUMA information;
+ // we will only if are sure we are running on NUMA architecture
+ info := &Info{
+ arch: topology.ARCHITECTURE_SMP,
+ ctx: ctx,
+ }
+
+ // we do this trick because we need to make sure ctx.Setup() gets
+ // a chance to run before any subordinate package is created reusing
+ // our context.
+ loadDetectingTopology := func() error {
+ topo, err := topology.New(context.WithContext(ctx))
+ if err == nil {
+ info.arch = topo.Architecture
+ } else {
+ ctx.Warn("error detecting system topology: %v", err)
+ }
+ return info.load()
+ }
+
+ var err error
+ if context.Exists(merged) {
+ err = loadDetectingTopology()
+ } else {
+ err = ctx.Do(loadDetectingTopology)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// lookupDevice gets a device from cached data
+func (info *Info) lookupDevice(address string) *Device {
+ for _, dev := range info.Devices {
+ if dev.Address == address {
+ return dev
+ }
+ }
+ return nil
+}
+
+// simple private struct used to encapsulate PCI information in a top-level
+// "pci" YAML/JSON map/object key
+type pciPrinter struct {
+ Info *Info `json:"pci"`
+}
+
+// YAMLString returns a string with the PCI information formatted as YAML
+// under a top-level "pci:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, pciPrinter{i})
+}
+
+// JSONString returns a string with the PCI information formatted as JSON
+// under a top-level "pci:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, pciPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go
new file mode 100644
index 0000000000..485ac9bb08
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go
@@ -0,0 +1,414 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pci
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/jaypipes/pcidb"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/option"
+ pciaddr "github.com/jaypipes/ghw/pkg/pci/address"
+ "github.com/jaypipes/ghw/pkg/topology"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ // found running `wc` against real linux systems
+ modAliasExpectedLength = 54
+)
+
+func (i *Info) load() error {
+ // when consuming snapshots - most notably, but not only, in tests,
+ // the context pkg forces the chroot value to the unpacked snapshot root.
+ // This is intentional, intentionally transparent and ghw is prepared to handle this case.
+ // However, `pcidb` is not. It doesn't know about ghw snaphots, nor it should.
+ // so we need to complicate things a bit. If the user explicitely supplied
+ // a chroot option, then we should honor it all across the stack, and passing down
+ // the chroot to pcidb is the right thing to do. If, however, the chroot was
+ // implcitely set by snapshot support, then this must be consumed by ghw only.
+ // In this case we should NOT pass it down to pcidb.
+ chroot := i.ctx.Chroot
+ if i.ctx.SnapshotPath != "" {
+ chroot = option.DefaultChroot
+ }
+ db, err := pcidb.New(pcidb.WithChroot(chroot))
+ if err != nil {
+ return err
+ }
+ i.Classes = db.Classes
+ i.Vendors = db.Vendors
+ i.Products = db.Products
+ i.Devices = i.ListDevices()
+ return nil
+}
+
+func getDeviceModaliasPath(ctx *context.Context, pciAddr *pciaddr.Address) string {
+ paths := linuxpath.New(ctx)
+ return filepath.Join(
+ paths.SysBusPciDevices,
+ pciAddr.String(),
+ "modalias",
+ )
+}
+
+func getDeviceRevision(ctx *context.Context, pciAddr *pciaddr.Address) string {
+ paths := linuxpath.New(ctx)
+ revisionPath := filepath.Join(
+ paths.SysBusPciDevices,
+ pciAddr.String(),
+ "revision",
+ )
+
+ if _, err := os.Stat(revisionPath); err != nil {
+ return ""
+ }
+ revision, err := ioutil.ReadFile(revisionPath)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(revision))
+}
+
+func getDeviceNUMANode(ctx *context.Context, pciAddr *pciaddr.Address) *topology.Node {
+ paths := linuxpath.New(ctx)
+ numaNodePath := filepath.Join(paths.SysBusPciDevices, pciAddr.String(), "numa_node")
+
+ if _, err := os.Stat(numaNodePath); err != nil {
+ return nil
+ }
+
+ nodeIdx := util.SafeIntFromFile(ctx, numaNodePath)
+ if nodeIdx == -1 {
+ return nil
+ }
+
+ return &topology.Node{
+ ID: nodeIdx,
+ }
+}
+
+func getDeviceDriver(ctx *context.Context, pciAddr *pciaddr.Address) string {
+ paths := linuxpath.New(ctx)
+ driverPath := filepath.Join(paths.SysBusPciDevices, pciAddr.String(), "driver")
+
+ if _, err := os.Stat(driverPath); err != nil {
+ return ""
+ }
+
+ dest, err := os.Readlink(driverPath)
+ if err != nil {
+ return ""
+ }
+ return filepath.Base(dest)
+}
+
+type deviceModaliasInfo struct {
+ vendorID string
+ productID string
+ subproductID string
+ subvendorID string
+ classID string
+ subclassID string
+ progIfaceID string
+}
+
+func parseModaliasFile(fp string) *deviceModaliasInfo {
+ if _, err := os.Stat(fp); err != nil {
+ return nil
+ }
+ data, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return nil
+ }
+
+ return parseModaliasData(string(data))
+}
+
+func parseModaliasData(data string) *deviceModaliasInfo {
+ // extra sanity check to avoid segfaults. We actually expect
+ // the data to be exactly long `modAliasExpectedlength`, but
+ // we will happily ignore any extra data we don't know how to
+ // handle.
+ if len(data) < modAliasExpectedLength {
+ return nil
+ }
+ // The modalias file is an encoded file that looks like this:
+ //
+ // $ cat /sys/devices/pci0000\:00/0000\:00\:03.0/0000\:03\:00.0/modalias
+ // pci:v000010DEd00001C82sv00001043sd00008613bc03sc00i00
+ //
+ // It is interpreted like so:
+ //
+ // pci: -- ignore
+ // v000010DE -- PCI vendor ID
+ // d00001C82 -- PCI device ID (the product/model ID)
+ // sv00001043 -- PCI subsystem vendor ID
+ // sd00008613 -- PCI subsystem device ID (subdevice product/model ID)
+ // bc03 -- PCI base class
+ // sc00 -- PCI subclass
+ // i00 -- programming interface
+ vendorID := strings.ToLower(data[9:13])
+ productID := strings.ToLower(data[18:22])
+ subvendorID := strings.ToLower(data[28:32])
+ subproductID := strings.ToLower(data[38:42])
+ classID := data[44:46]
+ subclassID := data[48:50]
+ progIfaceID := data[51:53]
+ return &deviceModaliasInfo{
+ vendorID: vendorID,
+ productID: productID,
+ subproductID: subproductID,
+ subvendorID: subvendorID,
+ classID: classID,
+ subclassID: subclassID,
+ progIfaceID: progIfaceID,
+ }
+}
+
+// Returns a pointer to a pcidb.Vendor struct matching the supplied vendor
+// ID string. If no such vendor ID string could be found, returns the
+// pcidb.Vendor struct populated with "unknown" vendor Name attribute and
+// empty Products attribute.
+func findPCIVendor(info *Info, vendorID string) *pcidb.Vendor {
+ vendor := info.Vendors[vendorID]
+ if vendor == nil {
+ return &pcidb.Vendor{
+ ID: vendorID,
+ Name: util.UNKNOWN,
+ Products: []*pcidb.Product{},
+ }
+ }
+ return vendor
+}
+
+// Returns a pointer to a pcidb.Product struct matching the supplied vendor
+// and product ID strings. If no such product could be found, returns the
+// pcidb.Product struct populated with "unknown" product Name attribute and
+// empty Subsystems attribute.
+func findPCIProduct(
+ info *Info,
+ vendorID string,
+ productID string,
+) *pcidb.Product {
+ product := info.Products[vendorID+productID]
+ if product == nil {
+ return &pcidb.Product{
+ ID: productID,
+ Name: util.UNKNOWN,
+ Subsystems: []*pcidb.Product{},
+ }
+ }
+ return product
+}
+
+// Returns a pointer to a pcidb.Product struct matching the supplied vendor,
+// product, subvendor and subproduct ID strings. If no such product could be
+// found, returns the pcidb.Product struct populated with "unknown" product
+// Name attribute and empty Subsystems attribute.
+func findPCISubsystem(
+ info *Info,
+ vendorID string,
+ productID string,
+ subvendorID string,
+ subproductID string,
+) *pcidb.Product {
+ product := info.Products[vendorID+productID]
+ subvendor := info.Vendors[subvendorID]
+ if subvendor != nil && product != nil {
+ for _, p := range product.Subsystems {
+ if p.ID == subproductID {
+ return p
+ }
+ }
+ }
+ return &pcidb.Product{
+ VendorID: subvendorID,
+ ID: subproductID,
+ Name: util.UNKNOWN,
+ }
+}
+
+// Returns a pointer to a pcidb.Class struct matching the supplied class ID
+// string. If no such class ID string could be found, returns the
+// pcidb.Class struct populated with "unknown" class Name attribute and
+// empty Subclasses attribute.
+func findPCIClass(info *Info, classID string) *pcidb.Class {
+ class := info.Classes[classID]
+ if class == nil {
+ return &pcidb.Class{
+ ID: classID,
+ Name: util.UNKNOWN,
+ Subclasses: []*pcidb.Subclass{},
+ }
+ }
+ return class
+}
+
+// Returns a pointer to a pcidb.Subclass struct matching the supplied class
+// and subclass ID strings. If no such subclass could be found, returns the
+// pcidb.Subclass struct populated with "unknown" subclass Name attribute
+// and empty ProgrammingInterfaces attribute.
+func findPCISubclass(
+ info *Info,
+ classID string,
+ subclassID string,
+) *pcidb.Subclass {
+ class := info.Classes[classID]
+ if class != nil {
+ for _, sc := range class.Subclasses {
+ if sc.ID == subclassID {
+ return sc
+ }
+ }
+ }
+ return &pcidb.Subclass{
+ ID: subclassID,
+ Name: util.UNKNOWN,
+ ProgrammingInterfaces: []*pcidb.ProgrammingInterface{},
+ }
+}
+
+// Returns a pointer to a pcidb.ProgrammingInterface struct matching the
+// supplied class, subclass and programming interface ID strings. If no such
+// programming interface could be found, returns the
+// pcidb.ProgrammingInterface struct populated with "unknown" Name attribute
+func findPCIProgrammingInterface(
+ info *Info,
+ classID string,
+ subclassID string,
+ progIfaceID string,
+) *pcidb.ProgrammingInterface {
+ subclass := findPCISubclass(info, classID, subclassID)
+ for _, pi := range subclass.ProgrammingInterfaces {
+ if pi.ID == progIfaceID {
+ return pi
+ }
+ }
+ return &pcidb.ProgrammingInterface{
+ ID: progIfaceID,
+ Name: util.UNKNOWN,
+ }
+}
+
+// GetDevice returns a pointer to a Device struct that describes the PCI
+// device at the requested address. If no such device could be found, returns nil.
+func (info *Info) GetDevice(address string) *Device {
+ // check cached data first
+ if dev := info.lookupDevice(address); dev != nil {
+ return dev
+ }
+
+ pciAddr := pciaddr.FromString(address)
+ if pciAddr == nil {
+ info.ctx.Warn("error parsing the pci address %q", address)
+ return nil
+ }
+
+ // no cached data, let's get the information from system.
+ fp := getDeviceModaliasPath(info.ctx, pciAddr)
+ if fp == "" {
+ info.ctx.Warn("error finding modalias info for device %q", address)
+ return nil
+ }
+
+ modaliasInfo := parseModaliasFile(fp)
+ if modaliasInfo == nil {
+ info.ctx.Warn("error parsing modalias info for device %q", address)
+ return nil
+ }
+
+ device := info.getDeviceFromModaliasInfo(address, modaliasInfo)
+ device.Revision = getDeviceRevision(info.ctx, pciAddr)
+ if info.arch == topology.ARCHITECTURE_NUMA {
+ device.Node = getDeviceNUMANode(info.ctx, pciAddr)
+ }
+ device.Driver = getDeviceDriver(info.ctx, pciAddr)
+ return device
+}
+
+// ParseDevice returns a pointer to a Device given its describing data.
+// The PCI device obtained this way may not exist in the system;
+// use GetDevice to get a *Device which is found in the system
+func (info *Info) ParseDevice(address, modalias string) *Device {
+ modaliasInfo := parseModaliasData(modalias)
+ if modaliasInfo == nil {
+ return nil
+ }
+ return info.getDeviceFromModaliasInfo(address, modaliasInfo)
+}
+
+func (info *Info) getDeviceFromModaliasInfo(address string, modaliasInfo *deviceModaliasInfo) *Device {
+ vendor := findPCIVendor(info, modaliasInfo.vendorID)
+ product := findPCIProduct(
+ info,
+ modaliasInfo.vendorID,
+ modaliasInfo.productID,
+ )
+ subsystem := findPCISubsystem(
+ info,
+ modaliasInfo.vendorID,
+ modaliasInfo.productID,
+ modaliasInfo.subvendorID,
+ modaliasInfo.subproductID,
+ )
+ class := findPCIClass(info, modaliasInfo.classID)
+ subclass := findPCISubclass(
+ info,
+ modaliasInfo.classID,
+ modaliasInfo.subclassID,
+ )
+ progIface := findPCIProgrammingInterface(
+ info,
+ modaliasInfo.classID,
+ modaliasInfo.subclassID,
+ modaliasInfo.progIfaceID,
+ )
+
+ return &Device{
+ Address: address,
+ Vendor: vendor,
+ Subsystem: subsystem,
+ Product: product,
+ Class: class,
+ Subclass: subclass,
+ ProgrammingInterface: progIface,
+ }
+}
+
+// ListDevices returns a list of pointers to Device structs present on the
+// host system
+// DEPRECATED. Will be removed in v1.0. Please use
+// github.com/jaypipes/pcidb to explore PCIDB information
+func (info *Info) ListDevices() []*Device {
+ paths := linuxpath.New(info.ctx)
+ devs := make([]*Device, 0)
+ // We scan the /sys/bus/pci/devices directory which contains a collection
+ // of symlinks. The names of the symlinks are all the known PCI addresses
+ // for the host. For each address, we grab a *Device matching the
+ // address and append to the returned array.
+ links, err := ioutil.ReadDir(paths.SysBusPciDevices)
+ if err != nil {
+ info.ctx.Warn("failed to read /sys/bus/pci/devices")
+ return nil
+ }
+ var dev *Device
+ for _, link := range links {
+ addr := link.Name()
+ dev = info.GetDevice(addr)
+ if dev == nil {
+ info.ctx.Warn("failed to get device information for PCI address %s", addr)
+ } else {
+ devs = append(devs, dev)
+ }
+ }
+ return devs
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go
new file mode 100644
index 0000000000..9ebb396d2d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go
@@ -0,0 +1,32 @@
+//go:build !linux
+// +build !linux
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pci
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("pciFillInfo not implemented on " + runtime.GOOS)
+}
+
+// GetDevice returns a pointer to a Device struct that describes the PCI
+// device at the requested address. If no such device could be found, returns
+// nil
+func (info *Info) GetDevice(address string) *Device {
+ return nil
+}
+
+// ListDevices returns a list of pointers to Device structs present on the
+// host system
+func (info *Info) ListDevices() []*Device {
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product.go b/vendor/github.com/jaypipes/ghw/pkg/product/product.go
new file mode 100644
index 0000000000..83d6541d30
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product.go
@@ -0,0 +1,96 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// Info defines product information
+type Info struct {
+ ctx *context.Context
+ Family string `json:"family"`
+ Name string `json:"name"`
+ Vendor string `json:"vendor"`
+ SerialNumber string `json:"serial_number"`
+ UUID string `json:"uuid"`
+ SKU string `json:"sku"`
+ Version string `json:"version"`
+}
+
+func (i *Info) String() string {
+ familyStr := ""
+ if i.Family != "" {
+ familyStr = " family=" + i.Family
+ }
+ nameStr := ""
+ if i.Name != "" {
+ nameStr = " name=" + i.Name
+ }
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ serialStr := ""
+ if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN {
+ serialStr = " serial=" + i.SerialNumber
+ }
+ uuidStr := ""
+ if i.UUID != "" && i.UUID != util.UNKNOWN {
+ uuidStr = " uuid=" + i.UUID
+ }
+ skuStr := ""
+ if i.SKU != "" {
+ skuStr = " sku=" + i.SKU
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+
+ return "product" + util.ConcatStrings(
+ familyStr,
+ nameStr,
+ vendorStr,
+ serialStr,
+ uuidStr,
+ skuStr,
+ versionStr,
+ )
+}
+
+// New returns a pointer to a Info struct containing information
+// about the host's product
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate product information in a top-level
+// "product" YAML/JSON map/object key
+type productPrinter struct {
+ Info *Info `json:"product"`
+}
+
+// YAMLString returns a string with the product information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, productPrinter{info})
+}
+
+// JSONString returns a string with the product information formatted as JSON
+// under a top-level "product:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, productPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go
new file mode 100644
index 0000000000..36b6b4471b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go
@@ -0,0 +1,23 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "github.com/jaypipes/ghw/pkg/linuxdmi"
+)
+
+func (i *Info) load() error {
+
+ i.Family = linuxdmi.Item(i.ctx, "product_family")
+ i.Name = linuxdmi.Item(i.ctx, "product_name")
+ i.Vendor = linuxdmi.Item(i.ctx, "sys_vendor")
+ i.SerialNumber = linuxdmi.Item(i.ctx, "product_serial")
+ i.UUID = linuxdmi.Item(i.ctx, "product_uuid")
+ i.SKU = linuxdmi.Item(i.ctx, "product_sku")
+ i.Version = linuxdmi.Item(i.ctx, "product_version")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go
new file mode 100644
index 0000000000..8fc9724fbf
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("productFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go
new file mode 100644
index 0000000000..c919cb0f6e
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go
@@ -0,0 +1,45 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlProduct = "SELECT Caption, Description, IdentifyingNumber, Name, SKUNumber, Vendor, Version, UUID FROM Win32_ComputerSystemProduct"
+
+type win32Product struct {
+ Caption *string
+ Description *string
+ IdentifyingNumber *string
+ Name *string
+ SKUNumber *string
+ Vendor *string
+ Version *string
+ UUID *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32ProductDescriptions []win32Product
+ // Assuming the first product is the host...
+ if err := wmi.Query(wqlProduct, &win32ProductDescriptions); err != nil {
+ return err
+ }
+ if len(win32ProductDescriptions) > 0 {
+ i.Family = util.UNKNOWN
+ i.Name = *win32ProductDescriptions[0].Name
+ i.Vendor = *win32ProductDescriptions[0].Vendor
+ i.SerialNumber = *win32ProductDescriptions[0].IdentifyingNumber
+ i.UUID = *win32ProductDescriptions[0].UUID
+ i.SKU = *win32ProductDescriptions[0].SKUNumber
+ i.Version = *win32ProductDescriptions[0].Version
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go
new file mode 100644
index 0000000000..519a874d9d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go
@@ -0,0 +1,199 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// Attempting to tar up pseudofiles like /proc/cpuinfo is an exercise in
+// futility. Notably, the pseudofiles, when read by syscalls, do not return the
+// number of bytes read. This causes the tar writer to write zero-length files.
+//
+// Instead, it is necessary to build a directory structure in a tmpdir and
+// create actual files with copies of the pseudofile contents
+
+// CloneTreeInto copies all the pseudofiles that ghw will consume into the root
+// `scratchDir`, preserving the hieratchy.
+func CloneTreeInto(scratchDir string) error {
+ err := setupScratchDir(scratchDir)
+ if err != nil {
+ return err
+ }
+ fileSpecs := ExpectedCloneContent()
+ return CopyFilesInto(fileSpecs, scratchDir, nil)
+}
+
+// ExpectedCloneContent return a slice of glob patterns which represent the pseudofiles
+// ghw cares about.
+// The intended usage of this function is to validate a clone tree, checking that the
+// content matches the expectations.
+// Beware: the content is host-specific, because the content pertaining some subsystems,
+// most notably PCI, is host-specific and unpredictable.
+func ExpectedCloneContent() []string {
+ fileSpecs := ExpectedCloneStaticContent()
+ fileSpecs = append(fileSpecs, ExpectedCloneNetContent()...)
+ fileSpecs = append(fileSpecs, ExpectedClonePCIContent()...)
+ fileSpecs = append(fileSpecs, ExpectedCloneGPUContent()...)
+ return fileSpecs
+}
+
+// ValidateClonedTree checks the content of a cloned tree, whose root is `clonedDir`,
+// against a slice of glob specs which must be included in the cloned tree.
+// Is not wrong, and this functions doesn't enforce this, that the cloned tree includes
+// more files than the necessary; ghw will just ignore the files it doesn't care about.
+// Returns a slice of glob patters expected (given) but not found in the cloned tree,
+// and the error during the validation (if any).
+func ValidateClonedTree(fileSpecs []string, clonedDir string) ([]string, error) {
+ missing := []string{}
+ for _, fileSpec := range fileSpecs {
+ matches, err := filepath.Glob(filepath.Join(clonedDir, fileSpec))
+ if err != nil {
+ return missing, err
+ }
+ if len(matches) == 0 {
+ missing = append(missing, fileSpec)
+ }
+ }
+ return missing, nil
+}
+
+// CopyFileOptions allows to finetune the behaviour of the CopyFilesInto function
+type CopyFileOptions struct {
+ // IsSymlinkFn allows to control the behaviour when handling a symlink.
+ // If this hook returns true, the source file is treated as symlink: the cloned
+ // tree will thus contain a symlink, with its path adjusted to match the relative
+ // path inside the cloned tree. If return false, the symlink will be deferred.
+ // The easiest use case of this hook is if you want to avoid symlinks in your cloned
+ // tree (having duplicated content). In this case you can just add a function
+ // which always return false.
+ IsSymlinkFn func(path string, info os.FileInfo) bool
+ // ShouldCreateDirFn allows to control if empty directories listed as clone
+ // content should be created or not. When creating snapshots, empty directories
+ // are most often useless (but also harmless). Because of this, directories are only
+ // created as side effect of copying the files which are inside, and thus directories
+ // are never empty. The only notable exception are device driver on linux: in this
+ // case, for a number of technical/historical reasons, we care about the directory
+ // name, but not about the files which are inside.
+ // Hence, this is the only case on which ghw clones empty directories.
+ ShouldCreateDirFn func(path string, info os.FileInfo) bool
+}
+
+// CopyFilesInto copies all the given glob files specs in the given `destDir` directory,
+// preserving the directory structure. This means you can provide a deeply nested filespec
+// like
+// - /some/deeply/nested/file*
+// and you DO NOT need to build the tree incrementally like
+// - /some/
+// - /some/deeply/
+// ...
+// all glob patterns supported in `filepath.Glob` are supported.
+func CopyFilesInto(fileSpecs []string, destDir string, opts *CopyFileOptions) error {
+ if opts == nil {
+ opts = &CopyFileOptions{
+ IsSymlinkFn: isSymlink,
+ ShouldCreateDirFn: isDriversDir,
+ }
+ }
+ for _, fileSpec := range fileSpecs {
+ trace("copying spec: %q\n", fileSpec)
+ matches, err := filepath.Glob(fileSpec)
+ if err != nil {
+ return err
+ }
+ if err := copyFileTreeInto(matches, destDir, opts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func copyFileTreeInto(paths []string, destDir string, opts *CopyFileOptions) error {
+ for _, path := range paths {
+ trace(" copying path: %q\n", path)
+ baseDir := filepath.Dir(path)
+ if err := os.MkdirAll(filepath.Join(destDir, baseDir), os.ModePerm); err != nil {
+ return err
+ }
+
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+ // directories must be listed explicitly and created separately.
+ // In the future we may want to expose this decision as hook point in
+ // CopyFileOptions, when clear use cases emerge.
+ destPath := filepath.Join(destDir, path)
+ if fi.IsDir() {
+ if opts.ShouldCreateDirFn(path, fi) {
+ if err := os.MkdirAll(destPath, os.ModePerm); err != nil {
+ return err
+ }
+ } else {
+ trace("expanded glob path %q is a directory - skipped\n", path)
+ }
+ continue
+ }
+ if opts.IsSymlinkFn(path, fi) {
+ trace(" copying link: %q -> %q\n", path, destPath)
+ if err := copyLink(path, destPath); err != nil {
+ return err
+ }
+ } else {
+ trace(" copying file: %q -> %q\n", path, destPath)
+ if err := copyPseudoFile(path, destPath); err != nil && !errors.Is(err, os.ErrPermission) {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func isSymlink(path string, fi os.FileInfo) bool {
+ return fi.Mode()&os.ModeSymlink != 0
+}
+
+func isDriversDir(path string, fi os.FileInfo) bool {
+ return strings.Contains(path, "drivers")
+}
+
+func copyLink(path, targetPath string) error {
+ target, err := os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ trace(" symlink %q -> %q\n", target, targetPath)
+ if err := os.Symlink(target, targetPath); err != nil {
+ if errors.Is(err, os.ErrExist) {
+ return nil
+ }
+ return err
+ }
+
+ return nil
+}
+
+func copyPseudoFile(path, targetPath string) error {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go
new file mode 100644
index 0000000000..18e2161a4e
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go
@@ -0,0 +1,221 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+func createBlockDevices(buildDir string) error {
+ // Grab all the block device pseudo-directories from /sys/block symlinks
+ // (excluding loopback devices) and inject them into our build filesystem
+ // with all but the circular symlink'd subsystem directories
+ devLinks, err := ioutil.ReadDir("/sys/block")
+ if err != nil {
+ return err
+ }
+ for _, devLink := range devLinks {
+ dname := devLink.Name()
+ if strings.HasPrefix(dname, "loop") {
+ continue
+ }
+ devPath := filepath.Join("/sys/block", dname)
+ trace("processing block device %q\n", devPath)
+
+ // from the sysfs layout, we know this is always a symlink
+ linkContentPath, err := os.Readlink(devPath)
+ if err != nil {
+ return err
+ }
+ trace("link target for block device %q is %q\n", devPath, linkContentPath)
+
+ // Create a symlink in our build filesystem that is a directory
+ // pointing to the actual device bus path where the block device's
+ // information directory resides
+ linkPath := filepath.Join(buildDir, "sys/block", dname)
+ linkTargetPath := filepath.Join(
+ buildDir,
+ "sys/block",
+ strings.TrimPrefix(linkContentPath, string(os.PathSeparator)),
+ )
+ trace("creating device directory %s\n", linkTargetPath)
+ if err = os.MkdirAll(linkTargetPath, os.ModePerm); err != nil {
+ return err
+ }
+
+ trace("linking device directory %s to %s\n", linkPath, linkContentPath)
+ // Make sure the link target is a relative path!
+ // if we use absolute path, the link target will be an absolute path starting
+ // with buildDir, hence the snapshot will contain broken link.
+ // Otherwise, the unpack directory will never have the same prefix of buildDir!
+ if err = os.Symlink(linkContentPath, linkPath); err != nil {
+ return err
+ }
+ // Now read the source block device directory and populate the
+ // newly-created target link in the build directory with the
+ // appropriate block device pseudofiles
+ srcDeviceDir := filepath.Join(
+ "/sys/block",
+ strings.TrimPrefix(linkContentPath, string(os.PathSeparator)),
+ )
+ trace("creating device directory %q from %q\n", linkTargetPath, srcDeviceDir)
+ if err = createBlockDeviceDir(linkTargetPath, srcDeviceDir); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func createBlockDeviceDir(buildDeviceDir string, srcDeviceDir string) error {
+ // Populate the supplied directory (in our build filesystem) with all the
+ // appropriate information pseudofile contents for the block device.
+ devName := filepath.Base(srcDeviceDir)
+ devFiles, err := ioutil.ReadDir(srcDeviceDir)
+ if err != nil {
+ return err
+ }
+ for _, f := range devFiles {
+ fname := f.Name()
+ fp := filepath.Join(srcDeviceDir, fname)
+ fi, err := os.Lstat(fp)
+ if err != nil {
+ return err
+ }
+ if fi.Mode()&os.ModeSymlink != 0 {
+ // Ignore any symlinks in the deviceDir since they simply point to
+ // either self-referential links or information we aren't
+ // interested in like "subsystem"
+ continue
+ } else if fi.IsDir() {
+ if strings.HasPrefix(fname, devName) {
+ // We're interested in are the directories that begin with the
+ // block device name. These are directories with information
+ // about the partitions on the device
+ buildPartitionDir := filepath.Join(
+ buildDeviceDir, fname,
+ )
+ srcPartitionDir := filepath.Join(
+ srcDeviceDir, fname,
+ )
+ trace("creating partition directory %s\n", buildPartitionDir)
+ err = os.MkdirAll(buildPartitionDir, os.ModePerm)
+ if err != nil {
+ return err
+ }
+ err = createPartitionDir(buildPartitionDir, srcPartitionDir)
+ if err != nil {
+ return err
+ }
+ }
+ } else if fi.Mode().IsRegular() {
+ // Regular files in the block device directory are both regular and
+ // pseudofiles containing information such as the size (in sectors)
+ // and whether the device is read-only
+ buf, err := ioutil.ReadFile(fp)
+ if err != nil {
+ if errors.Is(err, os.ErrPermission) {
+ // example: /sys/devices/virtual/block/zram0/compact is 0400
+ trace("permission denied reading %q - skipped\n", fp)
+ continue
+ }
+ return err
+ }
+ targetPath := filepath.Join(buildDeviceDir, fname)
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ // There is a special file $DEVICE_DIR/queue/rotational that, for some hard
+ // drives, contains a 1 or 0 indicating whether the device is a spinning
+ // disk or not
+ srcQueueDir := filepath.Join(
+ srcDeviceDir,
+ "queue",
+ )
+ buildQueueDir := filepath.Join(
+ buildDeviceDir,
+ "queue",
+ )
+ err = os.MkdirAll(buildQueueDir, os.ModePerm)
+ if err != nil {
+ return err
+ }
+ fp := filepath.Join(srcQueueDir, "rotational")
+ buf, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return err
+ }
+ targetPath := filepath.Join(buildQueueDir, "rotational")
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+
+ return nil
+}
+
+func createPartitionDir(buildPartitionDir string, srcPartitionDir string) error {
+ // Populate the supplied directory (in our build filesystem) with all the
+ // appropriate information pseudofile contents for the partition.
+ partFiles, err := ioutil.ReadDir(srcPartitionDir)
+ if err != nil {
+ return err
+ }
+ for _, f := range partFiles {
+ fname := f.Name()
+ fp := filepath.Join(srcPartitionDir, fname)
+ fi, err := os.Lstat(fp)
+ if err != nil {
+ return err
+ }
+ if fi.Mode()&os.ModeSymlink != 0 {
+ // Ignore any symlinks in the partition directory since they simply
+ // point to information we aren't interested in like "subsystem"
+ continue
+ } else if fi.IsDir() {
+ // The subdirectories in the partition directory are not
+ // interesting for us. They have information about power events and
+ // traces
+ continue
+ } else if fi.Mode().IsRegular() {
+ // Regular files in the block device directory are both regular and
+ // pseudofiles containing information such as the size (in sectors)
+ // and whether the device is read-only
+ buf, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return err
+ }
+ targetPath := filepath.Join(buildPartitionDir, fname)
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go
new file mode 100644
index 0000000000..a26d6b01fb
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go
@@ -0,0 +1,33 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "strings"
+)
+
+// ExpectedCloneGPUContent returns a slice of strings pertaining to the GPU devices ghw
+// cares about. We cannot use a static list because we want to grab only the first cardX data
+// (see comment in pkg/gpu/gpu_linux.go)
+// Additionally, we want to make sure to clone the backing device data.
+func ExpectedCloneGPUContent() []string {
+ cardEntries := []string{
+ "device",
+ }
+
+ filterName := func(cardName string) bool {
+ if !strings.HasPrefix(cardName, "card") {
+ return false
+ }
+ if strings.ContainsRune(cardName, '-') {
+ return false
+ }
+ return true
+ }
+
+ return cloneContentByClass("drm", cardEntries, filterName, filterNone)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go
new file mode 100644
index 0000000000..0ccd69350d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go
@@ -0,0 +1,109 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+func setupScratchDir(scratchDir string) error {
+ var createPaths = []string{
+ "sys/block",
+ }
+
+ for _, path := range createPaths {
+ if err := os.MkdirAll(filepath.Join(scratchDir, path), os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return createBlockDevices(scratchDir)
+}
+
+// ExpectedCloneStaticContent return a slice of glob patterns which represent the pseudofiles
+// ghw cares about, and which are independent from host specific topology or configuration,
+// thus are safely represented by a static slice - e.g. they don't need to be discovered at runtime.
+func ExpectedCloneStaticContent() []string {
+ return []string{
+ "/proc/cpuinfo",
+ "/proc/meminfo",
+ "/proc/self/mounts",
+ "/sys/devices/system/cpu/cpu*/cache/index*/*",
+ "/sys/devices/system/cpu/cpu*/topology/*",
+ "/sys/devices/system/memory/block_size_bytes",
+ "/sys/devices/system/memory/memory*/online",
+ "/sys/devices/system/memory/memory*/state",
+ "/sys/devices/system/node/has_*",
+ "/sys/devices/system/node/online",
+ "/sys/devices/system/node/possible",
+ "/sys/devices/system/node/node*/cpu*",
+ "/sys/devices/system/node/node*/distance",
+ "/sys/devices/system/node/node*/meminfo",
+ "/sys/devices/system/node/node*/memory*",
+ "/sys/devices/system/node/node*/hugepages/hugepages-*/*",
+ }
+}
+
+type filterFunc func(string) bool
+
+// cloneContentByClass copies all the content related to a given device class
+// (devClass), possibly filtering out devices whose name does NOT pass a
+// filter (filterName). Each entry in `/sys/class/$CLASS` is actually a
+// symbolic link. We can filter out entries depending on the link target.
+// Each filter is a simple function which takes the entry name or the link
+// target and must return true if the entry should be collected, false
+// otherwise. Last, explicitly collect a list of attributes for each entry,
+// given as list of glob patterns as `subEntries`.
+// Return the final list of glob patterns to be collected.
+func cloneContentByClass(devClass string, subEntries []string, filterName filterFunc, filterLink filterFunc) []string {
+ var fileSpecs []string
+
+ // warning: don't use the context package here, this means not even the linuxpath package.
+ // TODO(fromani) remove the path duplication
+ sysClass := filepath.Join("sys", "class", devClass)
+ entries, err := ioutil.ReadDir(sysClass)
+ if err != nil {
+ // we should not import context, hence we can't Warn()
+ return fileSpecs
+ }
+ for _, entry := range entries {
+ devName := entry.Name()
+
+ if !filterName(devName) {
+ continue
+ }
+
+ devPath := filepath.Join(sysClass, devName)
+ dest, err := os.Readlink(devPath)
+ if err != nil {
+ continue
+ }
+
+ if !filterLink(dest) {
+ continue
+ }
+
+ // so, first copy the symlink itself
+ fileSpecs = append(fileSpecs, devPath)
+ // now we have to clone the content of the actual entry
+ // related (and found into a subdir of) the backing hardware
+ // device
+ devData := filepath.Clean(filepath.Join(sysClass, dest))
+ for _, subEntry := range subEntries {
+ fileSpecs = append(fileSpecs, filepath.Join(devData, subEntry))
+ }
+ }
+
+ return fileSpecs
+}
+
+// filterNone allows all content, filtering out none of it
+func filterNone(_ string) bool {
+ return true
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go
new file mode 100644
index 0000000000..27b27573fa
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go
@@ -0,0 +1,28 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "strings"
+)
+
+// ExpectedCloneNetContent returns a slice of strings pertaning to the network interfaces ghw
+// cares about. We cannot use a static list because we want to filter away the virtual devices,
+// which ghw doesn't concern itself about. So we need to do some runtime discovery.
+// Additionally, we want to make sure to clone the backing device data.
+func ExpectedCloneNetContent() []string {
+ ifaceEntries := []string{
+ "addr_assign_type",
+ // intentionally avoid to clone "address" to avoid to leak any host-idenfifiable data.
+ }
+
+ filterLink := func(linkDest string) bool {
+ return !strings.Contains(linkDest, "devices/virtual/net")
+ }
+
+ return cloneContentByClass("net", ifaceEntries, filterNone, filterLink)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go
new file mode 100644
index 0000000000..dbc3fc83f5
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go
@@ -0,0 +1,151 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ pciaddr "github.com/jaypipes/ghw/pkg/pci/address"
+)
+
+const (
+ // root directory: entry point to start scanning the PCI forest
+ // warning: don't use the context package here, this means not even the linuxpath package.
+ // TODO(fromani) remove the path duplication
+ sysBusPCIDir = "/sys/bus/pci/devices"
+)
+
+// ExpectedClonePCIContent return a slice of glob patterns which represent the pseudofiles
+// ghw cares about, pertaining to PCI devices only.
+// Beware: the content is host-specific, because the PCI topology is host-dependent and unpredictable.
+func ExpectedClonePCIContent() []string {
+ fileSpecs := []string{
+ "/sys/bus/pci/drivers/*",
+ }
+ pciRoots := []string{
+ sysBusPCIDir,
+ }
+ for {
+ if len(pciRoots) == 0 {
+ break
+ }
+ pciRoot := pciRoots[0]
+ pciRoots = pciRoots[1:]
+ specs, roots := scanPCIDeviceRoot(pciRoot)
+ pciRoots = append(pciRoots, roots...)
+ fileSpecs = append(fileSpecs, specs...)
+ }
+ return fileSpecs
+}
+
+// scanPCIDeviceRoot reports a slice of glob patterns which represent the pseudofiles
+// ghw cares about pertaining to all the PCI devices connected to the bus connected from the
+// given root; usually (but not always) a CPU packages has 1+ PCI(e) roots, forming the first
+// level; more PCI bridges are (usually) attached to this level, creating deep nested trees.
+// hence we need to scan all possible roots, to make sure not to miss important devices.
+//
+// note about notifying errors. This function and its helper functions do use trace() everywhere
+// to report recoverable errors, even though it would have been appropriate to use Warn().
+// This is unfortunate, and again a byproduct of the fact we cannot use context.Context to avoid
+// circular dependencies.
+// TODO(fromani): switch to Warn() as soon as we figure out how to break this circular dep.
+func scanPCIDeviceRoot(root string) (fileSpecs []string, pciRoots []string) {
+ trace("scanning PCI device root %q\n", root)
+
+ perDevEntries := []string{
+ "class",
+ "device",
+ "driver",
+ "irq",
+ "local_cpulist",
+ "modalias",
+ "numa_node",
+ "revision",
+ "vendor",
+ }
+ entries, err := ioutil.ReadDir(root)
+ if err != nil {
+ return []string{}, []string{}
+ }
+ for _, entry := range entries {
+ entryName := entry.Name()
+ if addr := pciaddr.FromString(entryName); addr == nil {
+ // doesn't look like a entry we care about
+ // This is by far and large the most likely path
+ // hence we should NOT trace/warn here.
+ continue
+ }
+
+ entryPath := filepath.Join(root, entryName)
+ pciEntry, err := findPCIEntryFromPath(root, entryName)
+ if err != nil {
+ trace("error scanning %q: %v", entryName, err)
+ continue
+ }
+
+ trace("PCI entry is %q\n", pciEntry)
+ fileSpecs = append(fileSpecs, entryPath)
+ for _, perNetEntry := range perDevEntries {
+ fileSpecs = append(fileSpecs, filepath.Join(pciEntry, perNetEntry))
+ }
+
+ if isPCIBridge(entryPath) {
+ trace("adding new PCI root %q\n", entryName)
+ pciRoots = append(pciRoots, pciEntry)
+ }
+ }
+ return fileSpecs, pciRoots
+}
+
+func findPCIEntryFromPath(root, entryName string) (string, error) {
+ entryPath := filepath.Join(root, entryName)
+ fi, err := os.Lstat(entryPath)
+ if err != nil {
+ return "", fmt.Errorf("stat(%s) failed: %v\n", entryPath, err)
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ // regular file, nothing to resolve
+ return entryPath, nil
+ }
+ // resolve symlink
+ target, err := os.Readlink(entryPath)
+ trace("entry %q is symlink resolved to %q\n", entryPath, target)
+ if err != nil {
+ return "", fmt.Errorf("readlink(%s) failed: %v - skipped\n", entryPath, err)
+ }
+ return filepath.Clean(filepath.Join(root, target)), nil
+}
+
+func isPCIBridge(entryPath string) bool {
+ subNodes, err := ioutil.ReadDir(entryPath)
+ if err != nil {
+ // this is so unlikely we don't even return error. But we trace just in case.
+ trace("error scanning device entry path %q: %v", entryPath, err)
+ return false
+ }
+ for _, subNode := range subNodes {
+ if !subNode.IsDir() {
+ continue
+ }
+ if addr := pciaddr.FromString(subNode.Name()); addr != nil {
+ // we got an entry in the directory pertaining to this device
+ // which is a directory itself and it is named like a PCI address.
+ // Hence we infer the device we are considering is a PCI bridge of sorts.
+ // This is is indeed a bit brutal, but the only possible alternative
+ // (besides blindly copying everything in /sys/bus/pci/devices) is
+ // to detect the type of the device and pick only the bridges.
+ // This approach duplicates the logic within the `pci` subkpg
+ // - or forces us into awkward dep cycles, and has poorer forward
+ // compatibility.
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go
new file mode 100644
index 0000000000..af85a55b5d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go
@@ -0,0 +1,30 @@
+//go:build !linux
+// +build !linux
+
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+func setupScratchDir(scratchDir string) error {
+ return nil
+}
+
+func ExpectedCloneStaticContent() []string {
+ return []string{}
+}
+
+func ExpectedCloneGPUContent() []string {
+ return []string{}
+}
+
+func ExpectedCloneNetContent() []string {
+ return []string{}
+}
+
+func ExpectedClonePCIContent() []string {
+ return []string{}
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go
new file mode 100644
index 0000000000..94b5bb6984
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go
@@ -0,0 +1,113 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// PackFrom creates the snapshot named `snapshotName` from the
+// directory tree whose root is `sourceRoot`.
+func PackFrom(snapshotName, sourceRoot string) error {
+ f, err := OpenDestination(snapshotName)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return PackWithWriter(f, sourceRoot)
+}
+
+// OpenDestination opens the `snapshotName` file for writing, bailing out
+// if the file seems to exist and have existing content already.
+// This is done to avoid accidental overwrites.
+func OpenDestination(snapshotName string) (*os.File, error) {
+ var f *os.File
+ var err error
+
+ if _, err = os.Stat(snapshotName); errors.Is(err, os.ErrNotExist) {
+ if f, err = os.Create(snapshotName); err != nil {
+ return nil, err
+ }
+ } else if err != nil {
+ return nil, err
+ } else {
+ f, err := os.OpenFile(snapshotName, os.O_WRONLY, 0600)
+ if err != nil {
+ return nil, err
+ }
+ fs, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if fs.Size() > 0 {
+ return nil, fmt.Errorf("File %s already exists and is of size >0", snapshotName)
+ }
+ }
+ return f, nil
+}
+
+// PakcWithWriter creates a snapshot sending all the binary data to the
+// given `fw` writer. The snapshot is made from the directory tree whose
+// root is `sourceRoot`.
+func PackWithWriter(fw io.Writer, sourceRoot string) error {
+ gzw := gzip.NewWriter(fw)
+ defer gzw.Close()
+
+ tw := tar.NewWriter(gzw)
+ defer tw.Close()
+
+ return createSnapshot(tw, sourceRoot)
+}
+
+func createSnapshot(tw *tar.Writer, buildDir string) error {
+ return filepath.Walk(buildDir, func(path string, fi os.FileInfo, _ error) error {
+ if path == buildDir {
+ return nil
+ }
+ var link string
+ var err error
+
+ if fi.Mode()&os.ModeSymlink != 0 {
+ trace("processing symlink %s\n", path)
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return err
+ }
+ hdr.Name = strings.TrimPrefix(strings.TrimPrefix(path, buildDir), string(os.PathSeparator))
+
+ if err = tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ switch hdr.Typeflag {
+ case tar.TypeReg, tar.TypeRegA:
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ if _, err = io.Copy(tw, f); err != nil {
+ return err
+ }
+ f.Close()
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz b/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz
new file mode 100644
index 0000000000..edb26fbda3
Binary files /dev/null and b/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz differ
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go
new file mode 100644
index 0000000000..78c76121ae
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go
@@ -0,0 +1,17 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+var trace func(msg string, args ...interface{})
+
+func init() {
+ trace = func(msg string, args ...interface{}) {}
+}
+
+func SetTraceFunction(fn func(msg string, args ...interface{})) {
+ trace = fn
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go
new file mode 100644
index 0000000000..3df395e277
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go
@@ -0,0 +1,129 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+const (
+ TargetRoot = "ghw-snapshot-*"
+)
+
+const (
+ // If set, `ghw` will not unpack the snapshot in the user-supplied directory
+ // unless the aforementioned directory is empty.
+ OwnTargetDirectory = 1 << iota
+)
+
+// Clanup removes the unpacket snapshot from the target root.
+// Please not that the environs variable `GHW_SNAPSHOT_PRESERVE`, if set,
+// will make this function silently skip.
+func Cleanup(targetRoot string) error {
+ if option.EnvOrDefaultSnapshotPreserve() {
+ return nil
+ }
+ return os.RemoveAll(targetRoot)
+}
+
+// Unpack expands the given snapshot in a temporary directory managed by `ghw`. Returns the path of that directory.
+func Unpack(snapshotName string) (string, error) {
+ targetRoot, err := ioutil.TempDir("", TargetRoot)
+ if err != nil {
+ return "", err
+ }
+ _, err = UnpackInto(snapshotName, targetRoot, 0)
+ return targetRoot, err
+}
+
+// UnpackInto expands the given snapshot in a client-supplied directory.
+// Returns true if the snapshot was actually unpacked, false otherwise
+func UnpackInto(snapshotName, targetRoot string, flags uint) (bool, error) {
+ if (flags&OwnTargetDirectory) == OwnTargetDirectory && !isEmptyDir(targetRoot) {
+ return false, nil
+ }
+ snap, err := os.Open(snapshotName)
+ if err != nil {
+ return false, err
+ }
+ defer snap.Close()
+ return true, Untar(targetRoot, snap)
+}
+
+// Untar extracts data from the given reader (providing data in tar.gz format) and unpacks it in the given directory.
+func Untar(root string, r io.Reader) error {
+ var err error
+ gzr, err := gzip.NewReader(r)
+ if err != nil {
+ return err
+ }
+ defer gzr.Close()
+
+ tr := tar.NewReader(gzr)
+ for {
+ header, err := tr.Next()
+ if err == io.EOF {
+ // we are done
+ return nil
+ }
+
+ if err != nil {
+ // bail out
+ return err
+ }
+
+ if header == nil {
+ // TODO: how come?
+ continue
+ }
+
+ target := filepath.Join(root, header.Name)
+ mode := os.FileMode(header.Mode)
+
+ switch header.Typeflag {
+ case tar.TypeDir:
+ err = os.MkdirAll(target, mode)
+ if err != nil {
+ return err
+ }
+
+ case tar.TypeReg:
+ dst, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, mode)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(dst, tr)
+ if err != nil {
+ return err
+ }
+
+ dst.Close()
+
+ case tar.TypeSymlink:
+ err = os.Symlink(header.Linkname, target)
+ if err != nil {
+ return err
+ }
+ }
+ }
+}
+
+func isEmptyDir(name string) bool {
+ entries, err := ioutil.ReadDir(name)
+ if err != nil {
+ return false
+ }
+ return len(entries) == 0
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go
new file mode 100644
index 0000000000..4a269bb9b9
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go
@@ -0,0 +1,156 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/memory"
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+// Architecture describes the overall hardware architecture. It can be either
+// Symmetric Multi-Processor (SMP) or Non-Uniform Memory Access (NUMA)
+type Architecture int
+
+const (
+ // SMP is a Symmetric Multi-Processor system
+ ARCHITECTURE_SMP Architecture = iota
+ // NUMA is a Non-Uniform Memory Access system
+ ARCHITECTURE_NUMA
+)
+
+var (
+ architectureString = map[Architecture]string{
+ ARCHITECTURE_SMP: "SMP",
+ ARCHITECTURE_NUMA: "NUMA",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `architectureString`.
+ // This is done because of the choice we made in
+ // Architecture:MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringArchitecture = map[string]Architecture{
+ "smp": ARCHITECTURE_SMP,
+ "numa": ARCHITECTURE_NUMA,
+ }
+)
+
+func (a Architecture) String() string {
+ return architectureString[a]
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (a Architecture) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(a.String()))), nil
+}
+
+func (a *Architecture) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringArchitecture[key]
+ if !ok {
+ return fmt.Errorf("unknown architecture: %q", key)
+ }
+ *a = val
+ return nil
+}
+
+// Node is an abstract construct representing a collection of processors and
+// various levels of memory cache that those processors share. In a NUMA
+// architecture, there are multiple NUMA nodes, abstracted here as multiple
+// Node structs. In an SMP architecture, a single Node will be available in the
+// Info struct and this single struct can be used to describe the levels of
+// memory caching available to the single physical processor package's physical
+// processor cores
+type Node struct {
+ ID int `json:"id"`
+ Cores []*cpu.ProcessorCore `json:"cores"`
+ Caches []*memory.Cache `json:"caches"`
+ Distances []int `json:"distances"`
+ Memory *memory.Area `json:"memory"`
+}
+
+func (n *Node) String() string {
+ return fmt.Sprintf(
+ "node #%d (%d cores)",
+ n.ID,
+ len(n.Cores),
+ )
+}
+
+// Info describes the system topology for the host hardware
+type Info struct {
+ ctx *context.Context
+ Architecture Architecture `json:"architecture"`
+ Nodes []*Node `json:"nodes"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// NUMA topology on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ merged := option.Merge(opts...)
+ ctx := context.New(merged)
+ info := &Info{ctx: ctx}
+ var err error
+ if context.Exists(merged) {
+ err = info.load()
+ } else {
+ err = ctx.Do(info.load)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, node := range info.Nodes {
+ sort.Sort(memory.SortByCacheLevelTypeFirstProcessor(node.Caches))
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ archStr := "SMP"
+ if i.Architecture == ARCHITECTURE_NUMA {
+ archStr = "NUMA"
+ }
+ res := fmt.Sprintf(
+ "topology %s (%d nodes)",
+ archStr,
+ len(i.Nodes),
+ )
+ return res
+}
+
+// simple private struct used to encapsulate topology information in a
+// top-level "topology" YAML/JSON map/object key
+type topologyPrinter struct {
+ Info *Info `json:"topology"`
+}
+
+// YAMLString returns a string with the topology information formatted as YAML
+// under a top-level "topology:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, topologyPrinter{i})
+}
+
+// JSONString returns a string with the topology information formatted as JSON
+// under a top-level "topology:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, topologyPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go
new file mode 100644
index 0000000000..6844dd9687
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go
@@ -0,0 +1,107 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/memory"
+)
+
+func (i *Info) load() error {
+ i.Nodes = topologyNodes(i.ctx)
+ if len(i.Nodes) == 1 {
+ i.Architecture = ARCHITECTURE_SMP
+ } else {
+ i.Architecture = ARCHITECTURE_NUMA
+ }
+ return nil
+}
+
+func topologyNodes(ctx *context.Context) []*Node {
+ paths := linuxpath.New(ctx)
+ nodes := make([]*Node, 0)
+
+ files, err := ioutil.ReadDir(paths.SysDevicesSystemNode)
+ if err != nil {
+ ctx.Warn("failed to determine nodes: %s\n", err)
+ return nodes
+ }
+ for _, file := range files {
+ filename := file.Name()
+ if !strings.HasPrefix(filename, "node") {
+ continue
+ }
+ node := &Node{}
+ nodeID, err := strconv.Atoi(filename[4:])
+ if err != nil {
+ ctx.Warn("failed to determine node ID: %s\n", err)
+ return nodes
+ }
+ node.ID = nodeID
+ cores, err := cpu.CoresForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine cores for node: %s\n", err)
+ return nodes
+ }
+ node.Cores = cores
+ caches, err := memory.CachesForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine caches for node: %s\n", err)
+ return nodes
+ }
+ node.Caches = caches
+
+ distances, err := distancesForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine node distances for node: %s\n", err)
+ return nodes
+ }
+ node.Distances = distances
+
+ area, err := memory.AreaForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine memory area for node: %s\n", err)
+ return nodes
+ }
+ node.Memory = area
+
+ nodes = append(nodes, node)
+ }
+ return nodes
+}
+
+func distancesForNode(ctx *context.Context, nodeID int) ([]int, error) {
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ "distance",
+ )
+
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ items := strings.Fields(strings.TrimSpace(string(data)))
+ dists := make([]int, len(items)) // TODO: can a NUMA cell be offlined?
+ for idx, item := range items {
+ dist, err := strconv.Atoi(item)
+ if err != nil {
+ return dists, err
+ }
+ dists[idx] = dist
+ }
+ return dists, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go
new file mode 100644
index 0000000000..b5ee4354e0
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("topologyFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go
new file mode 100644
index 0000000000..3141ac99c9
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go
@@ -0,0 +1,156 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "encoding/binary"
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ rcFailure = 0
+ sizeofLogicalProcessorInfo = 32
+ errInsufficientBuffer syscall.Errno = 122
+
+ relationProcessorCore = 0
+ relationNUMANode = 1
+ relationCache = 2
+ relationProcessorPackage = 3
+ relationGroup = 4
+)
+
+func (i *Info) load() error {
+ nodes, err := topologyNodes()
+ if err != nil {
+ return err
+ }
+ i.Nodes = nodes
+ if len(nodes) == 1 {
+ i.Architecture = ARCHITECTURE_SMP
+ } else {
+ i.Architecture = ARCHITECTURE_NUMA
+ }
+ return nil
+}
+
+func topologyNodes() ([]*Node, error) {
+ nodes := make([]*Node, 0)
+ lpis, err := getWin32LogicalProcessorInfos()
+ if err != nil {
+ return nil, err
+ }
+ for _, lpi := range lpis {
+ switch lpi.relationship {
+ case relationNUMANode:
+ nodes = append(nodes, &Node{
+ ID: lpi.numaNodeID(),
+ })
+ case relationProcessorCore:
+ // TODO(jaypipes): associated LP to processor core
+ case relationProcessorPackage:
+ // ignore
+ case relationCache:
+ // TODO(jaypipes) handle cache layers
+ default:
+ return nil, fmt.Errorf("Unknown LOGICAL_PROCESSOR_RELATIONSHIP value: %d", lpi.relationship)
+
+ }
+ }
+ return nodes, nil
+}
+
+// This is the CACHE_DESCRIPTOR struct in the Win32 API
+type cacheDescriptor struct {
+ level uint8
+ associativity uint8
+ lineSize uint16
+ size uint32
+ cacheType uint32
+}
+
+// This is the SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct in the Win32 API
+type logicalProcessorInfo struct {
+ processorMask uint64
+ relationship uint64
+ // The following dummyunion member is a representation of this part of
+ // the SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct:
+ //
+ // union {
+ // struct {
+ // BYTE Flags;
+ // } ProcessorCore;
+ // struct {
+ // DWORD NodeNumber;
+ // } NumaNode;
+ // CACHE_DESCRIPTOR Cache;
+ // ULONGLONG Reserved[2];
+ // } DUMMYUNIONNAME;
+ dummyunion [16]byte
+}
+
+// numaNodeID returns the NUMA node's identifier from the logical processor
+// information struct by grabbing the integer representation of the struct's
+// NumaNode unioned data element
+func (lpi *logicalProcessorInfo) numaNodeID() int {
+ if lpi.relationship != relationNUMANode {
+ return -1
+ }
+ return int(binary.LittleEndian.Uint16(lpi.dummyunion[0:]))
+}
+
+// ref: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation
+func getWin32LogicalProcessorInfos() (
+ []*logicalProcessorInfo,
+ error,
+) {
+ lpis := make([]*logicalProcessorInfo, 0)
+ win32api := syscall.NewLazyDLL("kernel32.dll")
+ glpi := win32api.NewProc("GetLogicalProcessorInformation")
+
+ // The way the GetLogicalProcessorInformation (GLPI) Win32 API call
+ // works is wonky, but consistent with the Win32 API calling structure.
+ // Basically, you need to first call the GLPI API with a NUL pointerr
+ // and a pointer to an integer. That first call to the API should
+ // return ERROR_INSUFFICIENT_BUFFER, which is the indication that the
+ // supplied buffer pointer is NUL and needs to have memory allocated to
+ // it of an amount equal to the value of the integer pointer argument.
+ // Once the buffer is allocated this amount of space, the GLPI API call
+ // is again called. This time, the return value should be 0 and the
+ // buffer will have been set to an array of
+ // SYSTEM_LOGICAL_PROCESSOR_INFORMATION structs.
+ toAllocate := uint32(0)
+ // first, figure out how much we need
+ rc, _, win32err := glpi.Call(uintptr(0), uintptr(unsafe.Pointer(&toAllocate)))
+ if rc == rcFailure {
+ if win32err != errInsufficientBuffer {
+ return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API initial call failed to return ERROR_INSUFFICIENT_BUFFER")
+ }
+ } else {
+ // This shouldn't happen because buffer hasn't yet been allocated...
+ return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API initial call returned success instead of failure with ERROR_INSUFFICIENT_BUFFER")
+ }
+
+ // OK, now we actually allocate a raw buffer to fill with some number
+ // of SYSTEM_LOGICAL_PROCESSOR_INFORMATION structs
+ b := make([]byte, toAllocate)
+ rc, _, win32err = glpi.Call(uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&toAllocate)))
+ if rc == rcFailure {
+ return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API call failed to set supplied buffer. Win32 system error: %s", win32err)
+ }
+
+ for x := uint32(0); x < toAllocate; x += sizeofLogicalProcessorInfo {
+ lpiraw := b[x : x+sizeofLogicalProcessorInfo]
+ lpi := &logicalProcessorInfo{
+ processorMask: binary.LittleEndian.Uint64(lpiraw[0:]),
+ relationship: binary.LittleEndian.Uint64(lpiraw[8:]),
+ }
+ copy(lpi.dummyunion[0:16], lpiraw[16:32])
+ lpis = append(lpis, lpi)
+ }
+ return lpis, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go b/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go
new file mode 100644
index 0000000000..13fa7b5b48
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go
@@ -0,0 +1,37 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package unitutil
+
+var (
+ KB int64 = 1024
+ MB = KB * 1024
+ GB = MB * 1024
+ TB = GB * 1024
+ PB = TB * 1024
+ EB = PB * 1024
+)
+
+// AmountString returns a string representation of the amount with an amount
+// suffix corresponding to the nearest kibibit.
+//
+// For example, AmountString(1022) == "1022). AmountString(1024) == "1KB", etc
+func AmountString(size int64) (int64, string) {
+ switch {
+ case size < MB:
+ return KB, "KB"
+ case size < GB:
+ return MB, "MB"
+ case size < TB:
+ return GB, "GB"
+ case size < PB:
+ return TB, "TB"
+ case size < EB:
+ return PB, "PB"
+ default:
+ return EB, "EB"
+ }
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/util/util.go b/vendor/github.com/jaypipes/ghw/pkg/util/util.go
new file mode 100644
index 0000000000..b72430e2c3
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/util/util.go
@@ -0,0 +1,59 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package util
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+)
+
+const (
+ UNKNOWN = "unknown"
+)
+
+type closer interface {
+ Close() error
+}
+
+func SafeClose(c closer) {
+ err := c.Close()
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "failed to close: %s", err)
+ }
+}
+
+// Reads a supplied filepath and converts the contents to an integer. Returns
+// -1 if there were file permissions or existence errors or if the contents
+// could not be successfully converted to an integer. In any error, a warning
+// message is printed to STDERR and -1 is returned.
+func SafeIntFromFile(ctx *context.Context, path string) int {
+ msg := "failed to read int from file: %s\n"
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ ctx.Warn(msg, err)
+ return -1
+ }
+ contents := strings.TrimSpace(string(buf))
+ res, err := strconv.Atoi(contents)
+ if err != nil {
+ ctx.Warn(msg, err)
+ return -1
+ }
+ return res
+}
+
+// ConcatStrings concatenate strings in a larger one. This function
+// addresses a very specific ghw use case. For a more general approach,
+// just use strings.Join()
+func ConcatStrings(items ...string) string {
+ return strings.Join(items, "")
+}
diff --git a/vendor/github.com/jaypipes/pcidb/.gitignore b/vendor/github.com/jaypipes/pcidb/.gitignore
new file mode 100644
index 0000000000..cc292d34bc
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+coverage*.*
diff --git a/vendor/github.com/jaypipes/pcidb/COPYING b/vendor/github.com/jaypipes/pcidb/COPYING
new file mode 100644
index 0000000000..68c771a099
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/COPYING
@@ -0,0 +1,176 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
diff --git a/vendor/github.com/jaypipes/pcidb/LICENSE b/vendor/github.com/jaypipes/pcidb/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jaypipes/pcidb/Makefile b/vendor/github.com/jaypipes/pcidb/Makefile
new file mode 100644
index 0000000000..73a274c79f
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/Makefile
@@ -0,0 +1,38 @@
+VENDOR := vendor
+PKGS := $(shell go list ./... | grep -v /$(VENDOR)/)
+SRC = $(shell find . -type f -name '*.go' -not -path "*/$(VENDOR)/*")
+BIN_DIR := $(GOPATH)/bin
+GOMETALINTER := $(BIN_DIR)/gometalinter
+
+.PHONY: test
+test: vet
+ go test $(PKGS)
+
+$(GOMETALINTER):
+ go get -u github.com/alecthomas/gometalinter
+ $(GOMETALINTER) --install &> /dev/null
+
+.PHONY: lint
+lint: $(GOMETALINTER)
+ $(GOMETALINTER) ./... --vendor
+
+.PHONY: fmt
+fmt:
+ @gofmt -s -l -w $(SRC)
+
+.PHONY: fmtcheck
+fmtcheck:
+ @bash -c "diff -u <(echo -n) <(gofmt -d $(SRC))"
+
+.PHONY: vet
+vet:
+ go vet $(PKGS)
+
+.PHONY: cover
+cover:
+ $(shell [ -e coverage.out ] && rm coverage.out)
+ @echo "mode: count" > coverage-all.out
+ @$(foreach pkg,$(PKGS),\
+ go test -coverprofile=coverage.out -covermode=count $(pkg);\
+ tail -n +2 coverage.out >> coverage-all.out;)
+ go tool cover -html=coverage-all.out -o=coverage-all.html
diff --git a/vendor/github.com/jaypipes/pcidb/README.md b/vendor/github.com/jaypipes/pcidb/README.md
new file mode 100644
index 0000000000..ddfcde6bf8
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/README.md
@@ -0,0 +1,417 @@
+# `pcidb` - the Golang PCI DB library
+
+[![Build Status](https://github.com/jaypipes/pcidb/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/jaypipes/pcidb/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/jaypipes/pcidb)](https://goreportcard.com/report/github.com/jaypipes/pcidb)
+[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md)
+
+`pcidb` is a small Golang library for programmatic querying of PCI vendor,
+product and class information.
+
+We currently test `pcidb` on Linux, Windows and MacOSX.
+
+## Usage
+
+`pcidb` contains a PCI database inspection and querying facility that allows
+developers to query for information about hardware device classes, vendor and
+product information.
+
+The `pcidb.New()` function returns a `pcidb.PCIDB` struct or an error if the
+PCI database could not be loaded.
+
+> `pcidb`'s default behaviour is to first search for pci-ids DB files on the
+> local host system in well-known filesystem paths. If `pcidb` cannot find a
+> pci-ids DB file on the local host system, you can configure `pcidb` to fetch
+> a current pci-ids DB file from the network. You can enable this
+> network-fetching behaviour with the `pcidb.WithEnableNetworkFetch()` function
+> or set the `PCIDB_ENABLE_NETWORK_FETCH` to a non-0 value.
+
+The `pcidb.PCIDB` struct contains a number of fields that may be queried for
+PCI information:
+
+* `pcidb.PCIDB.Classes` is a map, keyed by the PCI class ID (a hex-encoded
+ string) of pointers to `pcidb.Class` structs, one for each class of PCI
+ device known to `pcidb`
+* `pcidb.PCIDB.Vendors` is a map, keyed by the PCI vendor ID (a hex-encoded
+ string) of pointers to `pcidb.Vendor` structs, one for each PCI vendor
+ known to `pcidb`
+* `pcidb.PCIDB.Products` is a map, keyed by the PCI product ID* (a hex-encoded
+ string) of pointers to `pcidb.Product` structs, one for each PCI product
+ known to `pcidb`
+
+**NOTE**: PCI products are often referred to by their "device ID". We use
+the term "product ID" in `pcidb` because it more accurately reflects what the
+identifier is for: a specific product line produced by the vendor.
+
+### Overriding the root mountpoint `pcidb` uses
+
+The default root mountpoint that `pcidb` uses when looking for information
+about the host system is `/`. So, for example, when looking up known PCI IDS DB
+files on Linux, `pcidb` will attempt to discover a pciids DB file at
+`/usr/share/misc/pci.ids`. If you are calling `pcidb` from a system that has an
+alternate root mountpoint, you can either set the `PCIDB_CHROOT` environment
+variable to that alternate path, or call the `pcidb.New()` function with the
+`pcidb.WithChroot()` modifier.
+
+For example, if you are executing from within an application container that has
+bind-mounted the root host filesystem to the mount point `/host`, you would set
+`PCIDB_CHROOT` to `/host` so that pcidb can find files like
+`/usr/share/misc/pci.ids` at `/host/usr/share/misc/pci.ids`.
+
+Alternately, you can use the `pcidb.WithChroot()` function like so:
+
+```go
+pci := pcidb.New(pcidb.WithChroot("/host"))
+```
+
+### PCI device classes
+
+Let's take a look at the PCI device class information and how to query the PCI
+database for class, subclass, and programming interface information.
+
+Each `pcidb.Class` struct contains the following fields:
+
+* `pcidb.Class.ID` is the hex-encoded string identifier for the device
+ class
+* `pcidb.Class.Name` is the common name/description of the class
+* `pcidb.Class.Subclasses` is an array of pointers to
+ `pcidb.Subclass` structs, one for each subclass in the device class
+
+Each `pcidb.Subclass` struct contains the following fields:
+
+* `pcidb.Subclass.ID` is the hex-encoded string identifier for the device
+ subclass
+* `pcidb.Subclass.Name` is the common name/description of the subclass
+* `pcidb.Subclass.ProgrammingInterfaces` is an array of pointers to
+ `pcidb.ProgrammingInterface` structs, one for each programming interface
+ for the device subclass
+
+Each `pcidb.ProgrammingInterface` struct contains the following fields:
+
+* `pcidb.ProgrammingInterface.ID` is the hex-encoded string identifier for
+ the programming interface
+* `pcidb.ProgrammingInterface.Name` is the common name/description for the
+ programming interface
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/pcidb"
+)
+
+func main() {
+ pci, err := pcidb.New()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ for _, devClass := range pci.Classes {
+ fmt.Printf(" Device class: %v ('%v')\n", devClass.Name, devClass.ID)
+ for _, devSubclass := range devClass.Subclasses {
+ fmt.Printf(" Device subclass: %v ('%v')\n", devSubclass.Name, devSubclass.ID)
+ for _, progIface := range devSubclass.ProgrammingInterfaces {
+ fmt.Printf(" Programming interface: %v ('%v')\n", progIface.Name, progIface.ID)
+ }
+ }
+ }
+}
+```
+
+Example output from my personal workstation, snipped for brevity:
+
+```
+...
+ Device class: Serial bus controller ('0c')
+ Device subclass: FireWire (IEEE 1394) ('00')
+ Programming interface: Generic ('00')
+ Programming interface: OHCI ('10')
+ Device subclass: ACCESS Bus ('01')
+ Device subclass: SSA ('02')
+ Device subclass: USB controller ('03')
+ Programming interface: UHCI ('00')
+ Programming interface: OHCI ('10')
+ Programming interface: EHCI ('20')
+ Programming interface: XHCI ('30')
+ Programming interface: Unspecified ('80')
+ Programming interface: USB Device ('fe')
+ Device subclass: Fibre Channel ('04')
+ Device subclass: SMBus ('05')
+ Device subclass: InfiniBand ('06')
+ Device subclass: IPMI SMIC interface ('07')
+ Device subclass: SERCOS interface ('08')
+ Device subclass: CANBUS ('09')
+...
+```
+
+### PCI vendors and products
+
+Let's take a look at the PCI vendor information and how to query the PCI
+database for vendor information and the products a vendor supplies.
+
+Each `pcidb.Vendor` struct contains the following fields:
+
+* `pcidb.Vendor.ID` is the hex-encoded string identifier for the vendor
+* `pcidb.Vendor.Name` is the common name/description of the vendor
+* `pcidb.Vendor.Products` is an array of pointers to `pcidb.Product`
+ structs, one for each product supplied by the vendor
+
+Each `pcidb.Product` struct contains the following fields:
+
+* `pcidb.Product.VendorID` is the hex-encoded string identifier for the
+ product's vendor
+* `pcidb.Product.ID` is the hex-encoded string identifier for the product
+* `pcidb.Product.Name` is the common name/description of the subclass
+* `pcidb.Product.Subsystems` is an array of pointers to
+ `pcidb.Product` structs, one for each "subsystem" (sometimes called
+ "sub-device" in PCI literature) for the product
+
+**NOTE**: A subsystem product may have a different vendor than its "parent" PCI
+product. This is sometimes referred to as the "sub-vendor".
+
+Here's some example code that demonstrates listing the PCI vendors with the
+most known products:
+
+```go
+package main
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/jaypipes/pcidb"
+)
+
+type ByCountProducts []*pcidb.Vendor
+
+func (v ByCountProducts) Len() int {
+ return len(v)
+}
+
+func (v ByCountProducts) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
+
+func (v ByCountProducts) Less(i, j int) bool {
+ return len(v[i].Products) > len(v[j].Products)
+}
+
+func main() {
+ pci, err := pcidb.New()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ vendors := make([]*pcidb.Vendor, len(pci.Vendors))
+ x := 0
+ for _, vendor := range pci.Vendors {
+ vendors[x] = vendor
+ x++
+ }
+
+ sort.Sort(ByCountProducts(vendors))
+
+ fmt.Println("Top 5 vendors by product")
+ fmt.Println("====================================================")
+ for _, vendor := range vendors[0:5] {
+ fmt.Printf("%v ('%v') has %d products\n", vendor.Name, vendor.ID, len(vendor.Products))
+ }
+}
+```
+
+which yields (on my local workstation as of July 7th, 2018):
+
+```
+Top 5 vendors by product
+====================================================
+Intel Corporation ('8086') has 3389 products
+NVIDIA Corporation ('10de') has 1358 products
+Advanced Micro Devices, Inc. [AMD/ATI] ('1002') has 886 products
+National Instruments ('1093') has 601 products
+Chelsio Communications Inc ('1425') has 525 products
+```
+
+The following is an example of querying the PCI product and subsystem
+information to find the products which have the most number of subsystems that
+have a different vendor than the top-level product. In other words, the two
+products which have been re-sold or re-manufactured with the most number of
+different companies.
+
+```go
+package main
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/jaypipes/pcidb"
+)
+
+type ByCountSeparateSubvendors []*pcidb.Product
+
+func (v ByCountSeparateSubvendors) Len() int {
+ return len(v)
+}
+
+func (v ByCountSeparateSubvendors) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
+
+func (v ByCountSeparateSubvendors) Less(i, j int) bool {
+ iVendor := v[i].VendorID
+ iSetSubvendors := make(map[string]bool, 0)
+ iNumDiffSubvendors := 0
+ jVendor := v[j].VendorID
+ jSetSubvendors := make(map[string]bool, 0)
+ jNumDiffSubvendors := 0
+
+ for _, sub := range v[i].Subsystems {
+ if sub.VendorID != iVendor {
+ iSetSubvendors[sub.VendorID] = true
+ }
+ }
+ iNumDiffSubvendors = len(iSetSubvendors)
+
+ for _, sub := range v[j].Subsystems {
+ if sub.VendorID != jVendor {
+ jSetSubvendors[sub.VendorID] = true
+ }
+ }
+ jNumDiffSubvendors = len(jSetSubvendors)
+
+ return iNumDiffSubvendors > jNumDiffSubvendors
+}
+
+func main() {
+ pci, err := pcidb.New()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ products := make([]*pcidb.Product, len(pci.Products))
+ x := 0
+ for _, product := range pci.Products {
+ products[x] = product
+ x++
+ }
+
+ sort.Sort(ByCountSeparateSubvendors(products))
+
+ fmt.Println("Top 2 products by # different subvendors")
+ fmt.Println("====================================================")
+ for _, product := range products[0:2] {
+ vendorID := product.VendorID
+ vendor := pci.Vendors[vendorID]
+ setSubvendors := make(map[string]bool, 0)
+
+ for _, sub := range product.Subsystems {
+ if sub.VendorID != vendorID {
+ setSubvendors[sub.VendorID] = true
+ }
+ }
+ fmt.Printf("%v ('%v') from %v\n", product.Name, product.ID, vendor.Name)
+ fmt.Printf(" -> %d subsystems under the following different vendors:\n", len(setSubvendors))
+ for subvendorID, _ := range setSubvendors {
+ subvendor, exists := pci.Vendors[subvendorID]
+ subvendorName := "Unknown subvendor"
+ if exists {
+ subvendorName = subvendor.Name
+ }
+ fmt.Printf(" - %v ('%v')\n", subvendorName, subvendorID)
+ }
+ }
+}
+```
+
+which yields (on my local workstation as of July 7th, 2018):
+
+```
+Top 2 products by # different subvendors
+====================================================
+RTL-8100/8101L/8139 PCI Fast Ethernet Adapter ('8139') from Realtek Semiconductor Co., Ltd.
+ -> 34 subsystems under the following different vendors:
+ - OVISLINK Corp. ('149c')
+ - EPoX Computer Co., Ltd. ('1695')
+ - Red Hat, Inc ('1af4')
+ - Mitac ('1071')
+ - Netgear ('1385')
+ - Micro-Star International Co., Ltd. [MSI] ('1462')
+ - Hangzhou Silan Microelectronics Co., Ltd. ('1904')
+ - Compex ('11f6')
+ - Edimax Computer Co. ('1432')
+ - KYE Systems Corporation ('1489')
+ - ZyXEL Communications Corporation ('187e')
+ - Acer Incorporated [ALI] ('1025')
+ - Matsushita Electric Industrial Co., Ltd. ('10f7')
+ - Ruby Tech Corp. ('146c')
+ - Belkin ('1799')
+ - Allied Telesis ('1259')
+ - Unex Technology Corp. ('1429')
+ - CIS Technology Inc ('1436')
+ - D-Link System Inc ('1186')
+ - Ambicom Inc ('1395')
+ - AOPEN Inc. ('a0a0')
+ - TTTech Computertechnik AG (Wrong ID) ('0357')
+ - Gigabyte Technology Co., Ltd ('1458')
+ - Packard Bell B.V. ('1631')
+ - Billionton Systems Inc ('14cb')
+ - Kingston Technologies ('2646')
+ - Accton Technology Corporation ('1113')
+ - Samsung Electronics Co Ltd ('144d')
+ - Biostar Microtech Int'l Corp ('1565')
+ - U.S. Robotics ('16ec')
+ - KTI ('8e2e')
+ - Hewlett-Packard Company ('103c')
+ - ASUSTeK Computer Inc. ('1043')
+ - Surecom Technology ('10bd')
+Bt878 Video Capture ('036e') from Brooktree Corporation
+ -> 30 subsystems under the following different vendors:
+ - iTuner ('aa00')
+ - Nebula Electronics Ltd. ('0071')
+ - DViCO Corporation ('18ac')
+ - iTuner ('aa05')
+ - iTuner ('aa0d')
+ - LeadTek Research Inc. ('107d')
+ - Avermedia Technologies Inc ('1461')
+ - Chaintech Computer Co. Ltd ('270f')
+ - iTuner ('aa07')
+ - iTuner ('aa0a')
+ - Microtune, Inc. ('1851')
+ - iTuner ('aa01')
+ - iTuner ('aa04')
+ - iTuner ('aa06')
+ - iTuner ('aa0f')
+ - iTuner ('aa02')
+ - iTuner ('aa0b')
+ - Pinnacle Systems, Inc. (Wrong ID) ('bd11')
+ - Rockwell International ('127a')
+ - Askey Computer Corp. ('144f')
+ - Twinhan Technology Co. Ltd ('1822')
+ - Anritsu Corp. ('1852')
+ - iTuner ('aa08')
+ - Hauppauge computer works Inc. ('0070')
+ - Pinnacle Systems Inc. ('11bd')
+ - Conexant Systems, Inc. ('14f1')
+ - iTuner ('aa09')
+ - iTuner ('aa03')
+ - iTuner ('aa0c')
+ - iTuner ('aa0e')
+```
+
+## Developers
+
+Contributions to `pcidb` are welcomed! Fork the repo on GitHub and submit a pull
+request with your proposed changes. Or, feel free to log an issue for a feature
+request or bug report.
+
+### Running tests
+
+You can run unit tests easily using the `make test` command, like so:
+
+
+```
+[jaypipes@uberbox pcidb]$ make test
+go test github.com/jaypipes/pcidb
+ok github.com/jaypipes/pcidb 0.045s
+```
diff --git a/vendor/github.com/jaypipes/pcidb/context.go b/vendor/github.com/jaypipes/pcidb/context.go
new file mode 100644
index 0000000000..da34599653
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/context.go
@@ -0,0 +1,86 @@
+package pcidb
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ homedir "github.com/mitchellh/go-homedir"
+)
+
+// Concrete merged set of configuration switches that get passed to pcidb
+// internal functions
+type context struct {
+ chroot string
+ cacheOnly bool
+ cachePath string
+ path string
+ enableNetworkFetch bool
+ searchPaths []string
+}
+
+func contextFromOptions(merged *WithOption) *context {
+ ctx := &context{
+ chroot: *merged.Chroot,
+ cacheOnly: *merged.CacheOnly,
+ cachePath: getCachePath(),
+ enableNetworkFetch: *merged.EnableNetworkFetch,
+ path: *merged.Path,
+ searchPaths: make([]string, 0),
+ }
+ ctx.setSearchPaths()
+ return ctx
+}
+
+func getCachePath() string {
+ hdir, err := homedir.Dir()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed getting homedir.Dir(): %v", err)
+ return ""
+ }
+ fp, err := homedir.Expand(filepath.Join(hdir, ".cache", "pci.ids"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed expanding local cache path: %v", err)
+ return ""
+ }
+ return fp
+}
+
+// Depending on the operating system, sets the context's searchPaths to a set
+// of local filepaths to search for a pci.ids database file
+func (ctx *context) setSearchPaths() {
+ // Look in direct path first, if set
+ if ctx.path != "" {
+ ctx.searchPaths = append(ctx.searchPaths, ctx.path)
+ return
+ }
+ // A set of filepaths we will first try to search for the pci-ids DB file
+ // on the local machine. If we fail to find one, we'll try pulling the
+ // latest pci-ids file from the network
+ ctx.searchPaths = append(ctx.searchPaths, ctx.cachePath)
+ if ctx.cacheOnly {
+ return
+ }
+
+ rootPath := ctx.chroot
+
+ if runtime.GOOS != "windows" {
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "hwdata", "pci.ids"),
+ )
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "misc", "pci.ids"),
+ )
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "hwdata", "pci.ids.gz"),
+ )
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "misc", "pci.ids.gz"),
+ )
+ }
+}
diff --git a/vendor/github.com/jaypipes/pcidb/discover.go b/vendor/github.com/jaypipes/pcidb/discover.go
new file mode 100644
index 0000000000..b0452d7db6
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/discover.go
@@ -0,0 +1,111 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pcidb
+
+import (
+ "bufio"
+ "compress/gzip"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ PCIIDS_URI = "https://pci-ids.ucw.cz/v2.2/pci.ids.gz"
+ USER_AGENT = "golang-jaypipes-pcidb"
+)
+
+func (db *PCIDB) load(ctx *context) error {
+ var foundPath string
+ for _, fp := range ctx.searchPaths {
+ if _, err := os.Stat(fp); err == nil {
+ foundPath = fp
+ break
+ }
+ }
+ if foundPath == "" {
+ if !ctx.enableNetworkFetch {
+ return ERR_NO_DB
+ }
+ // OK, so we didn't find any host-local copy of the pci-ids DB file. Let's
+ // try fetching it from the network and storing it
+ if err := cacheDBFile(ctx.cachePath); err != nil {
+ return err
+ }
+ foundPath = ctx.cachePath
+ }
+ f, err := os.Open(foundPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var scanner *bufio.Scanner
+ if strings.HasSuffix(foundPath, ".gz") {
+ var zipReader *gzip.Reader
+ if zipReader, err = gzip.NewReader(f); err != nil {
+ return err
+ }
+ defer zipReader.Close()
+ scanner = bufio.NewScanner(zipReader)
+ } else {
+ scanner = bufio.NewScanner(f)
+ }
+
+ return parseDBFile(db, scanner)
+}
+
+func ensureDir(fp string) error {
+ fpDir := filepath.Dir(fp)
+ if _, err := os.Stat(fpDir); os.IsNotExist(err) {
+ err = os.MkdirAll(fpDir, os.ModePerm)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Pulls down the latest copy of the pci-ids file from the network and stores
+// it in the local host filesystem
+func cacheDBFile(cacheFilePath string) error {
+ ensureDir(cacheFilePath)
+
+ client := new(http.Client)
+ request, err := http.NewRequest("GET", PCIIDS_URI, nil)
+ if err != nil {
+ return err
+ }
+ request.Header.Set("User-Agent", USER_AGENT)
+ response, err := client.Do(request)
+ if err != nil {
+ return err
+ }
+ defer response.Body.Close()
+ f, err := os.Create(cacheFilePath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.Remove(cacheFilePath)
+ }
+ }()
+ defer f.Close()
+ // write the gunzipped contents to our local cache file
+ zr, err := gzip.NewReader(response.Body)
+ if err != nil {
+ return err
+ }
+ defer zr.Close()
+ if _, err = io.Copy(f, zr); err != nil {
+ return err
+ }
+ return err
+}
diff --git a/vendor/github.com/jaypipes/pcidb/main.go b/vendor/github.com/jaypipes/pcidb/main.go
new file mode 100644
index 0000000000..d518748e71
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/main.go
@@ -0,0 +1,196 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pcidb
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+var (
+ ERR_NO_DB = fmt.Errorf("No pci-ids DB files found (and network fetch disabled)")
+ trueVar = true
+)
+
+// ProgrammingInterface is the PCI programming interface for a class of PCI
+// devices
+type ProgrammingInterface struct {
+ // hex-encoded PCI_ID of the programming interface
+ ID string `json:"id"`
+ // common string name for the programming interface
+ Name string `json:"name"`
+}
+
+// Subclass is a subdivision of a PCI class
+type Subclass struct {
+ // hex-encoded PCI_ID for the device subclass
+ ID string `json:"id"`
+ // common string name for the subclass
+ Name string `json:"name"`
+ // any programming interfaces this subclass might have
+ ProgrammingInterfaces []*ProgrammingInterface `json:"programming_interfaces"`
+}
+
+// Class is the PCI class
+type Class struct {
+ // hex-encoded PCI_ID for the device class
+ ID string `json:"id"`
+ // common string name for the class
+ Name string `json:"name"`
+ // any subclasses belonging to this class
+ Subclasses []*Subclass `json:"subclasses"`
+}
+
+// Product provides information about a PCI device model
+// NOTE(jaypipes): In the hardware world, the PCI "device_id" is the identifier
+// for the product/model
+type Product struct {
+ // vendor ID for the product
+ VendorID string `json:"vendor_id"`
+ // hex-encoded PCI_ID for the product/model
+ ID string `json:"id"`
+ // common string name of the vendor
+ Name string `json:"name"`
+ // "subdevices" or "subsystems" for the product
+ Subsystems []*Product `json:"subsystems"`
+}
+
+// Vendor provides information about a device vendor
+type Vendor struct {
+ // hex-encoded PCI_ID for the vendor
+ ID string `json:"id"`
+ // common string name of the vendor
+ Name string `json:"name"`
+ // all top-level devices for the vendor
+ Products []*Product `json:"products"`
+}
+
+type PCIDB struct {
+ // hash of class ID -> class information
+ Classes map[string]*Class `json:"classes"`
+ // hash of vendor ID -> vendor information
+ Vendors map[string]*Vendor `json:"vendors"`
+ // hash of vendor ID + product/device ID -> product information
+ Products map[string]*Product `json:"products"`
+}
+
+// WithOption is used to represent optionally-configured settings
+type WithOption struct {
+ // Chroot is the directory that pcidb uses when attempting to discover
+ // pciids DB files
+ Chroot *string
+ // CacheOnly is mostly just useful for testing. It essentially disables
+ // looking for any non ~/.cache/pci.ids filepaths (which is useful when we
+ // want to test the fetch-from-network code paths
+ CacheOnly *bool
+ // Enables fetching a pci-ids from a known location on the network if no
+ // local pci-ids DB files can be found.
+ EnableNetworkFetch *bool
+ // Path points to the absolute path of a pci.ids file in a non-standard
+ // location.
+ Path *string
+}
+
+func WithChroot(dir string) *WithOption {
+ return &WithOption{Chroot: &dir}
+}
+
+func WithCacheOnly() *WithOption {
+ return &WithOption{CacheOnly: &trueVar}
+}
+
+func WithDirectPath(path string) *WithOption {
+ return &WithOption{Path: &path}
+}
+
+func WithEnableNetworkFetch() *WithOption {
+ return &WithOption{EnableNetworkFetch: &trueVar}
+}
+
+func mergeOptions(opts ...*WithOption) *WithOption {
+ // Grab options from the environs by default
+ defaultChroot := "/"
+ if val, exists := os.LookupEnv("PCIDB_CHROOT"); exists {
+ defaultChroot = val
+ }
+ path := ""
+ if val, exists := os.LookupEnv("PCIDB_PATH"); exists {
+ path = val
+ }
+ defaultCacheOnly := false
+ if val, exists := os.LookupEnv("PCIDB_CACHE_ONLY"); exists {
+ if parsed, err := strconv.ParseBool(val); err != nil {
+ fmt.Fprintf(
+ os.Stderr,
+ "Failed parsing a bool from PCIDB_CACHE_ONLY "+
+ "environ value of %s",
+ val,
+ )
+ } else if parsed {
+ defaultCacheOnly = parsed
+ }
+ }
+ defaultEnableNetworkFetch := false
+ if val, exists := os.LookupEnv("PCIDB_ENABLE_NETWORK_FETCH"); exists {
+ if parsed, err := strconv.ParseBool(val); err != nil {
+ fmt.Fprintf(
+ os.Stderr,
+ "Failed parsing a bool from PCIDB_ENABLE_NETWORK_FETCH "+
+ "environ value of %s",
+ val,
+ )
+ } else if parsed {
+ defaultEnableNetworkFetch = parsed
+ }
+ }
+
+ merged := &WithOption{}
+ for _, opt := range opts {
+ if opt.Chroot != nil {
+ merged.Chroot = opt.Chroot
+ }
+ if opt.CacheOnly != nil {
+ merged.CacheOnly = opt.CacheOnly
+ }
+ if opt.EnableNetworkFetch != nil {
+ merged.EnableNetworkFetch = opt.EnableNetworkFetch
+ }
+ if opt.Path != nil {
+ merged.Path = opt.Path
+ }
+ }
+ // Set the default value if missing from merged
+ if merged.Chroot == nil {
+ merged.Chroot = &defaultChroot
+ }
+ if merged.CacheOnly == nil {
+ merged.CacheOnly = &defaultCacheOnly
+ }
+ if merged.EnableNetworkFetch == nil {
+ merged.EnableNetworkFetch = &defaultEnableNetworkFetch
+ }
+ if merged.Path == nil {
+ merged.Path = &path
+ }
+ return merged
+}
+
+// New returns a pointer to a PCIDB struct which contains information you can
+// use to query PCI vendor, product and class information. It accepts zero or
+// more pointers to WithOption structs. If you want to modify the behaviour of
+// pcidb, use one of the option modifiers when calling New. For example, to
+// change the root directory that pcidb uses when discovering pciids DB files,
+// call New(WithChroot("/my/root/override"))
+func New(opts ...*WithOption) (*PCIDB, error) {
+ ctx := contextFromOptions(mergeOptions(opts...))
+ db := &PCIDB{}
+ if err := db.load(ctx); err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/vendor/github.com/jaypipes/pcidb/parse.go b/vendor/github.com/jaypipes/pcidb/parse.go
new file mode 100644
index 0000000000..0fee5fe5e0
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/parse.go
@@ -0,0 +1,163 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pcidb
+
+import (
+ "bufio"
+ "strings"
+)
+
+func parseDBFile(db *PCIDB, scanner *bufio.Scanner) error {
+ inClassBlock := false
+ db.Classes = make(map[string]*Class, 20)
+ db.Vendors = make(map[string]*Vendor, 200)
+ db.Products = make(map[string]*Product, 1000)
+ subclasses := make([]*Subclass, 0)
+ progIfaces := make([]*ProgrammingInterface, 0)
+ var curClass *Class
+ var curSubclass *Subclass
+ var curProgIface *ProgrammingInterface
+ vendorProducts := make([]*Product, 0)
+ var curVendor *Vendor
+ var curProduct *Product
+ var curSubsystem *Product
+ productSubsystems := make([]*Product, 0)
+ for scanner.Scan() {
+ line := scanner.Text()
+ // skip comments and blank lines
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+ lineBytes := []rune(line)
+
+ // Lines starting with an uppercase "C" indicate a PCI top-level class
+ // dbrmation block. These lines look like this:
+ //
+ // C 02 Network controller
+ if lineBytes[0] == 'C' {
+ if curClass != nil {
+ // finalize existing class because we found a new class block
+ curClass.Subclasses = subclasses
+ subclasses = make([]*Subclass, 0)
+ }
+ inClassBlock = true
+ classID := string(lineBytes[2:4])
+ className := string(lineBytes[6:])
+ curClass = &Class{
+ ID: classID,
+ Name: className,
+ Subclasses: subclasses,
+ }
+ db.Classes[curClass.ID] = curClass
+ continue
+ }
+
+ // Lines not beginning with an uppercase "C" or a TAB character
+ // indicate a top-level vendor dbrmation block. These lines look like
+ // this:
+ //
+ // 0a89 BREA Technologies Inc
+ if lineBytes[0] != '\t' {
+ if curVendor != nil {
+ // finalize existing vendor because we found a new vendor block
+ curVendor.Products = vendorProducts
+ vendorProducts = make([]*Product, 0)
+ }
+ inClassBlock = false
+ vendorID := string(lineBytes[0:4])
+ vendorName := string(lineBytes[6:])
+ curVendor = &Vendor{
+ ID: vendorID,
+ Name: vendorName,
+ Products: vendorProducts,
+ }
+ db.Vendors[curVendor.ID] = curVendor
+ continue
+ }
+
+ // Lines beginning with only a single TAB character are *either* a
+ // subclass OR are a device dbrmation block. If we're in a class
+ // block (i.e. the last parsed block header was for a PCI class), then
+ // we parse a subclass block. Otherwise, we parse a device dbrmation
+ // block.
+ //
+ // A subclass dbrmation block looks like this:
+ //
+ // \t00 Non-VGA unclassified device
+ //
+ // A device dbrmation block looks like this:
+ //
+ // \t0002 PCI to MCA Bridge
+ if len(lineBytes) > 1 && lineBytes[1] != '\t' {
+ if inClassBlock {
+ if curSubclass != nil {
+ // finalize existing subclass because we found a new subclass block
+ curSubclass.ProgrammingInterfaces = progIfaces
+ progIfaces = make([]*ProgrammingInterface, 0)
+ }
+ subclassID := string(lineBytes[1:3])
+ subclassName := string(lineBytes[5:])
+ curSubclass = &Subclass{
+ ID: subclassID,
+ Name: subclassName,
+ ProgrammingInterfaces: progIfaces,
+ }
+ subclasses = append(subclasses, curSubclass)
+ } else {
+ if curProduct != nil {
+ // finalize existing product because we found a new product block
+ curProduct.Subsystems = productSubsystems
+ productSubsystems = make([]*Product, 0)
+ }
+ productID := string(lineBytes[1:5])
+ productName := string(lineBytes[7:])
+ productKey := curVendor.ID + productID
+ curProduct = &Product{
+ VendorID: curVendor.ID,
+ ID: productID,
+ Name: productName,
+ }
+ vendorProducts = append(vendorProducts, curProduct)
+ db.Products[productKey] = curProduct
+ }
+ } else {
+ // Lines beginning with two TAB characters are *either* a subsystem
+ // (subdevice) OR are a programming interface for a PCI device
+ // subclass. If we're in a class block (i.e. the last parsed block
+ // header was for a PCI class), then we parse a programming
+ // interface block, otherwise we parse a subsystem block.
+ //
+ // A programming interface block looks like this:
+ //
+ // \t\t00 UHCI
+ //
+ // A subsystem block looks like this:
+ //
+ // \t\t0e11 4091 Smart Array 6i
+ if inClassBlock {
+ progIfaceID := string(lineBytes[2:4])
+ progIfaceName := string(lineBytes[6:])
+ curProgIface = &ProgrammingInterface{
+ ID: progIfaceID,
+ Name: progIfaceName,
+ }
+ progIfaces = append(progIfaces, curProgIface)
+ } else {
+ vendorID := string(lineBytes[2:6])
+ subsystemID := string(lineBytes[7:11])
+ subsystemName := string(lineBytes[13:])
+ curSubsystem = &Product{
+ VendorID: vendorID,
+ ID: subsystemID,
+ Name: subsystemName,
+ }
+ productSubsystems = append(productSubsystems, curSubsystem)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644
index 0000000000..f9c841a51e
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644
index 0000000000..d70706d5b3
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 0000000000..25378537ea
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,167 @@
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ if !DisableCache {
+ cacheLock.RLock()
+ cached := homedirCache
+ cacheLock.RUnlock()
+ if cached != "" {
+ return cached, nil
+ }
+ }
+
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ var result string
+ var err error
+ if runtime.GOOS == "windows" {
+ result, err = dirWindows()
+ } else {
+ // Unix-like system, so just assume Unix
+ result, err = dirUnix()
+ }
+
+ if err != nil {
+ return "", err
+ }
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
+
+// Reset clears the cache, forcing the next call to Dir to re-detect
+// the home directory. This generally never has to be called, but can be
+// useful in tests if you're modifying the home directory via the HOME
+// env var or something.
+func Reset() {
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+ homedirCache = ""
+}
+
+func dirUnix() (string, error) {
+ homeEnv := "HOME"
+ if runtime.GOOS == "plan9" {
+ // On plan9, env vars are lowercase.
+ homeEnv = "home"
+ }
+
+ // First prefer the HOME environmental variable
+ if home := os.Getenv(homeEnv); home != "" {
+ return home, nil
+ }
+
+ var stdout bytes.Buffer
+
+ // If that fails, try OS specific commands
+ if runtime.GOOS == "darwin" {
+ cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err == nil {
+ result := strings.TrimSpace(stdout.String())
+ if result != "" {
+ return result, nil
+ }
+ }
+ } else {
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd := exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
+
+func dirWindows() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // Prefer standard environment variable USERPROFILE
+ if home := os.Getenv("USERPROFILE"); home != "" {
+ return home, nil
+ }
+
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
new file mode 100644
index 0000000000..7348c50c0c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000000..8da58fbf6f
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 0000000000..866d74a7ad
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000000..b50c6e8775
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,133 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000000..acf71402cf
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,744 @@
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+var disableLineWrapping = false
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ if disableLineWrapping {
+ emitter.best_width = -1
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000000..129bc2a97d
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,815 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ n.children = append(n.children, p.parse())
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[*node]bool
+ mapType reflect.Type
+ terrors []string
+ strict bool
+
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[*node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == yaml_BINARY_TAG {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ return true
+ }
+ if resolved != nil {
+ out.SetString(n.value)
+ return true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else if tag == yaml_TIMESTAMP_TAG {
+ // It looks like a timestamp but for backward compatibility
+ // reasons we set it as a string, so that code that unmarshals
+ // timestamp-like values into interface{} will continue to
+ // see a string and not a time.Time.
+ // TODO(v3) Drop this.
+ out.Set(reflect.ValueOf(n.value))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ return true
+ }
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ d.setMapIndex(n.children[i+1], out, k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+ if d.strict && out.MapIndex(k) != zeroValue {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+ return
+ }
+ out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ var doneFields []bool
+ if d.strict {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.strict {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ d.setMapIndex(n.children[i+1], inlineMap, name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ if n.alias != nil && n.alias.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ if ni.alias != nil && ni.alias.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000000..a1c2cc5262
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000000..0ee738e11b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,390 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+ Float64() (float64, error)
+ Int64() (int64, error)
+ String() string
+}
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ // doneInit holds whether the initial stream_start_event has been
+ // emitted.
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch m := iface.(type) {
+ case jsonNumber:
+ integer, err := m.Int64()
+ if err == nil {
+ // In this case the json.Number is a valid int64
+ in = reflect.ValueOf(integer)
+ break
+ }
+ float, err := m.Float64()
+ if err == nil {
+ // In this case the json.Number is a valid float64
+ in = reflect.ValueOf(float)
+ break
+ }
+ // fallback case - no number could be obtained
+ in = reflect.ValueOf(m.String())
+ case time.Time, *time.Time:
+ // Although time.Time implements TextMarshaler,
+ // we don't want to treat it as a string for YAML
+ // purposes because YAML has special support for
+ // timestamps.
+ case Marshaler:
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ case encoding.TextMarshaler:
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.Type() == ptrTimeType {
+ e.timev(tag, in.Elem())
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ if in.Type() == timeType {
+ e.timev(tag, in)
+ } else {
+ e.structv(tag, in)
+ }
+ case reflect.Slice, reflect.Array:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = yaml_BINARY_TAG
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000000..81d05dfe57
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000000..7c1f5fac3d
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000000..4120e0c916
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ }
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000000..0b9bb6030a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2711 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ if parser.tokens_head != len(parser.tokens) {
+ // If queue is non-empty, check if any potential simple key may
+ // occupy the head position.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000000..4c45e660a8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000000..a2dde608cb
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000000..30813884c0
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,478 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// FutureLineWrap globally disables line wrapping when encoding long strings.
+// This is a temporary and thus deprecated method introduced to faciliate
+// migration towards v3, which offers more control of line lengths on
+// individual encodings, and has a default matching the behavior introduced
+// by this function.
+//
+// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
+// in v2.4.0, at which point this function was introduced to help migration.
+func FutureLineWrap() {
+ disableLineWrapping = true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000000..f6a9c8e34b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000000..8110ce3c37
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/howett.net/plist/.gitignore b/vendor/howett.net/plist/.gitignore
new file mode 100644
index 0000000000..3743b34676
--- /dev/null
+++ b/vendor/howett.net/plist/.gitignore
@@ -0,0 +1,16 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+*.wasm
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/howett.net/plist/.gitlab-ci.yml b/vendor/howett.net/plist/.gitlab-ci.yml
new file mode 100644
index 0000000000..11d6dbf73e
--- /dev/null
+++ b/vendor/howett.net/plist/.gitlab-ci.yml
@@ -0,0 +1,39 @@
+image: golang:alpine
+stages:
+ - test
+
+variables:
+ GO_PACKAGE: "howett.net/plist"
+
+before_script:
+ - "mkdir -p $(dirname $GOPATH/src/$GO_PACKAGE)"
+ - "ln -s $(pwd) $GOPATH/src/$GO_PACKAGE"
+ - "cd $GOPATH/src/$GO_PACKAGE"
+
+.template:go-test: &template-go-test
+ stage: test
+ script:
+ - go test
+
+go-test-cover:latest:
+ stage: test
+ script:
+ - go test -v -cover
+ coverage: '/^coverage: \d+\.\d+/'
+
+go-test-appengine:latest:
+ stage: test
+ script:
+ - go test -tags appengine
+
+go-test:1.6:
+ <<: *template-go-test
+ image: golang:1.6-alpine
+
+go-test:1.4:
+ <<: *template-go-test
+ image: golang:1.4-alpine
+
+go-test:1.2:
+ <<: *template-go-test
+ image: golang:1.2
diff --git a/vendor/howett.net/plist/LICENSE b/vendor/howett.net/plist/LICENSE
new file mode 100644
index 0000000000..9f6012f32b
--- /dev/null
+++ b/vendor/howett.net/plist/LICENSE
@@ -0,0 +1,58 @@
+Copyright (c) 2013, Dustin L. Howett. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of the FreeBSD Project.
+
+--------------------------------------------------------------------------------
+Parts of this package were made available under the license covering
+the Go language and all attended core libraries. That license follows.
+--------------------------------------------------------------------------------
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/howett.net/plist/README.md b/vendor/howett.net/plist/README.md
new file mode 100644
index 0000000000..d751c062e1
--- /dev/null
+++ b/vendor/howett.net/plist/README.md
@@ -0,0 +1,21 @@
+# plist - A pure Go property list transcoder [![coverage report](https://gitlab.howett.net/go/plist/badges/main/coverage.svg)](https://gitlab.howett.net/go/plist/commits/main)
+## INSTALL
+```
+$ go get howett.net/plist
+```
+
+## FEATURES
+* Supports encoding/decoding property lists (Apple XML, Apple Binary, OpenStep and GNUStep) from/to arbitrary Go types
+
+## USE
+```go
+package main
+import (
+ "howett.net/plist"
+ "os"
+)
+func main() {
+ encoder := plist.NewEncoder(os.Stdout)
+ encoder.Encode(map[string]string{"hello": "world"})
+}
+```
diff --git a/vendor/howett.net/plist/bplist.go b/vendor/howett.net/plist/bplist.go
new file mode 100644
index 0000000000..962793a9f2
--- /dev/null
+++ b/vendor/howett.net/plist/bplist.go
@@ -0,0 +1,26 @@
+package plist
+
+type bplistTrailer struct {
+ Unused [5]uint8
+ SortVersion uint8
+ OffsetIntSize uint8
+ ObjectRefSize uint8
+ NumObjects uint64
+ TopObject uint64
+ OffsetTableOffset uint64
+}
+
+const (
+ bpTagNull uint8 = 0x00
+ bpTagBoolFalse = 0x08
+ bpTagBoolTrue = 0x09
+ bpTagInteger = 0x10
+ bpTagReal = 0x20
+ bpTagDate = 0x30
+ bpTagData = 0x40
+ bpTagASCIIString = 0x50
+ bpTagUTF16String = 0x60
+ bpTagUID = 0x80
+ bpTagArray = 0xA0
+ bpTagDictionary = 0xD0
+)
diff --git a/vendor/howett.net/plist/bplist_generator.go b/vendor/howett.net/plist/bplist_generator.go
new file mode 100644
index 0000000000..09ab71b1f4
--- /dev/null
+++ b/vendor/howett.net/plist/bplist_generator.go
@@ -0,0 +1,303 @@
+package plist
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+ "unicode/utf16"
+)
+
+func bplistMinimumIntSize(n uint64) int {
+ switch {
+ case n <= uint64(0xff):
+ return 1
+ case n <= uint64(0xffff):
+ return 2
+ case n <= uint64(0xffffffff):
+ return 4
+ default:
+ return 8
+ }
+}
+
+func bplistValueShouldUnique(pval cfValue) bool {
+ switch pval.(type) {
+ case cfString, *cfNumber, *cfReal, cfDate, cfData:
+ return true
+ }
+ return false
+}
+
+type bplistGenerator struct {
+ writer *countedWriter
+ objmap map[interface{}]uint64 // maps pValue.hash()es to object locations
+ objtable []cfValue
+ trailer bplistTrailer
+}
+
+func (p *bplistGenerator) flattenPlistValue(pval cfValue) {
+ key := pval.hash()
+ if bplistValueShouldUnique(pval) {
+ if _, ok := p.objmap[key]; ok {
+ return
+ }
+ }
+
+ p.objmap[key] = uint64(len(p.objtable))
+ p.objtable = append(p.objtable, pval)
+
+ switch pval := pval.(type) {
+ case *cfDictionary:
+ pval.sort()
+ for _, k := range pval.keys {
+ p.flattenPlistValue(cfString(k))
+ }
+ for _, v := range pval.values {
+ p.flattenPlistValue(v)
+ }
+ case *cfArray:
+ for _, v := range pval.values {
+ p.flattenPlistValue(v)
+ }
+ }
+}
+
+func (p *bplistGenerator) indexForPlistValue(pval cfValue) (uint64, bool) {
+ v, ok := p.objmap[pval.hash()]
+ return v, ok
+}
+
+func (p *bplistGenerator) generateDocument(root cfValue) {
+ p.objtable = make([]cfValue, 0, 16)
+ p.objmap = make(map[interface{}]uint64)
+ p.flattenPlistValue(root)
+
+ p.trailer.NumObjects = uint64(len(p.objtable))
+ p.trailer.ObjectRefSize = uint8(bplistMinimumIntSize(p.trailer.NumObjects))
+
+ p.writer.Write([]byte("bplist00"))
+
+ offtable := make([]uint64, p.trailer.NumObjects)
+ for i, pval := range p.objtable {
+ offtable[i] = uint64(p.writer.BytesWritten())
+ p.writePlistValue(pval)
+ }
+
+ p.trailer.OffsetIntSize = uint8(bplistMinimumIntSize(uint64(p.writer.BytesWritten())))
+ p.trailer.TopObject = p.objmap[root.hash()]
+ p.trailer.OffsetTableOffset = uint64(p.writer.BytesWritten())
+
+ for _, offset := range offtable {
+ p.writeSizedInt(offset, int(p.trailer.OffsetIntSize))
+ }
+
+ binary.Write(p.writer, binary.BigEndian, p.trailer)
+}
+
+func (p *bplistGenerator) writePlistValue(pval cfValue) {
+ if pval == nil {
+ return
+ }
+
+ switch pval := pval.(type) {
+ case *cfDictionary:
+ p.writeDictionaryTag(pval)
+ case *cfArray:
+ p.writeArrayTag(pval.values)
+ case cfString:
+ p.writeStringTag(string(pval))
+ case *cfNumber:
+ p.writeIntTag(pval.signed, pval.value)
+ case *cfReal:
+ if pval.wide {
+ p.writeRealTag(pval.value, 64)
+ } else {
+ p.writeRealTag(pval.value, 32)
+ }
+ case cfBoolean:
+ p.writeBoolTag(bool(pval))
+ case cfData:
+ p.writeDataTag([]byte(pval))
+ case cfDate:
+ p.writeDateTag(time.Time(pval))
+ case cfUID:
+ p.writeUIDTag(UID(pval))
+ default:
+ panic(fmt.Errorf("unknown plist type %t", pval))
+ }
+}
+
+func (p *bplistGenerator) writeSizedInt(n uint64, nbytes int) {
+ var val interface{}
+ switch nbytes {
+ case 1:
+ val = uint8(n)
+ case 2:
+ val = uint16(n)
+ case 4:
+ val = uint32(n)
+ case 8:
+ val = n
+ default:
+ panic(errors.New("illegal integer size"))
+ }
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeBoolTag(v bool) {
+ tag := uint8(bpTagBoolFalse)
+ if v {
+ tag = bpTagBoolTrue
+ }
+ binary.Write(p.writer, binary.BigEndian, tag)
+}
+
+func (p *bplistGenerator) writeIntTag(signed bool, n uint64) {
+ var tag uint8
+ var val interface{}
+ switch {
+ case n <= uint64(0xff):
+ val = uint8(n)
+ tag = bpTagInteger | 0x0
+ case n <= uint64(0xffff):
+ val = uint16(n)
+ tag = bpTagInteger | 0x1
+ case n <= uint64(0xffffffff):
+ val = uint32(n)
+ tag = bpTagInteger | 0x2
+ case n > uint64(0x7fffffffffffffff) && !signed:
+ // 64-bit values are always *signed* in format 00.
+ // Any unsigned value that doesn't intersect with the signed
+ // range must be sign-extended and stored as a SInt128
+ val = n
+ tag = bpTagInteger | 0x4
+ default:
+ val = n
+ tag = bpTagInteger | 0x3
+ }
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ if tag&0xF == 0x4 {
+ // SInt128; in the absence of true 128-bit integers in Go,
+ // we'll just fake the top half. We only got here because
+ // we had an unsigned 64-bit int that didn't fit,
+ // so sign extend it with zeroes.
+ binary.Write(p.writer, binary.BigEndian, uint64(0))
+ }
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeUIDTag(u UID) {
+ nbytes := bplistMinimumIntSize(uint64(u))
+ tag := uint8(bpTagUID | (nbytes - 1))
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ p.writeSizedInt(uint64(u), nbytes)
+}
+
+func (p *bplistGenerator) writeRealTag(n float64, bits int) {
+ var tag uint8 = bpTagReal | 0x3
+ var val interface{} = n
+ if bits == 32 {
+ val = float32(n)
+ tag = bpTagReal | 0x2
+ }
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeDateTag(t time.Time) {
+ tag := uint8(bpTagDate) | 0x3
+ val := float64(t.In(time.UTC).UnixNano()) / float64(time.Second)
+ val -= 978307200 // Adjust to Apple Epoch
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeCountedTag(tag uint8, count uint64) {
+ marker := tag
+ if count >= 0xF {
+ marker |= 0xF
+ } else {
+ marker |= uint8(count)
+ }
+
+ binary.Write(p.writer, binary.BigEndian, marker)
+
+ if count >= 0xF {
+ p.writeIntTag(false, count)
+ }
+}
+
+func (p *bplistGenerator) writeDataTag(data []byte) {
+ p.writeCountedTag(bpTagData, uint64(len(data)))
+ binary.Write(p.writer, binary.BigEndian, data)
+}
+
+func (p *bplistGenerator) writeStringTag(str string) {
+ for _, r := range str {
+ if r > 0x7F {
+ utf16Runes := utf16.Encode([]rune(str))
+ p.writeCountedTag(bpTagUTF16String, uint64(len(utf16Runes)))
+ binary.Write(p.writer, binary.BigEndian, utf16Runes)
+ return
+ }
+ }
+
+ p.writeCountedTag(bpTagASCIIString, uint64(len(str)))
+ binary.Write(p.writer, binary.BigEndian, []byte(str))
+}
+
+func (p *bplistGenerator) writeDictionaryTag(dict *cfDictionary) {
+ // assumption: sorted already; flattenPlistValue did this.
+ cnt := len(dict.keys)
+ p.writeCountedTag(bpTagDictionary, uint64(cnt))
+ vals := make([]uint64, cnt*2)
+ for i, k := range dict.keys {
+ // invariant: keys have already been "uniqued" (as PStrings)
+ keyIdx, ok := p.objmap[cfString(k).hash()]
+ if !ok {
+ panic(errors.New("failed to find key " + k + " in object map during serialization"))
+ }
+ vals[i] = keyIdx
+ }
+
+ for i, v := range dict.values {
+ // invariant: values have already been "uniqued"
+ objIdx, ok := p.indexForPlistValue(v)
+ if !ok {
+ panic(errors.New("failed to find value in object map during serialization"))
+ }
+ vals[i+cnt] = objIdx
+ }
+
+ for _, v := range vals {
+ p.writeSizedInt(v, int(p.trailer.ObjectRefSize))
+ }
+}
+
+func (p *bplistGenerator) writeArrayTag(arr []cfValue) {
+ p.writeCountedTag(bpTagArray, uint64(len(arr)))
+ for _, v := range arr {
+ objIdx, ok := p.indexForPlistValue(v)
+ if !ok {
+ panic(errors.New("failed to find value in object map during serialization"))
+ }
+
+ p.writeSizedInt(objIdx, int(p.trailer.ObjectRefSize))
+ }
+}
+
+func (p *bplistGenerator) Indent(i string) {
+ // There's nothing to indent.
+}
+
+func newBplistGenerator(w io.Writer) *bplistGenerator {
+ return &bplistGenerator{
+ writer: &countedWriter{Writer: mustWriter{w}},
+ }
+}
diff --git a/vendor/howett.net/plist/bplist_parser.go b/vendor/howett.net/plist/bplist_parser.go
new file mode 100644
index 0000000000..1825b570be
--- /dev/null
+++ b/vendor/howett.net/plist/bplist_parser.go
@@ -0,0 +1,353 @@
+package plist
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "runtime"
+ "time"
+ "unicode/utf16"
+)
+
+const (
+ signedHighBits = 0xFFFFFFFFFFFFFFFF
+)
+
+type offset uint64
+
+type bplistParser struct {
+ buffer []byte
+
+ reader io.ReadSeeker
+ version int
+ objects []cfValue // object ID to object
+ trailer bplistTrailer
+ trailerOffset uint64
+
+ containerStack []offset // slice of object offsets; manipulated during container deserialization
+}
+
+func (p *bplistParser) validateDocumentTrailer() {
+ if p.trailer.OffsetTableOffset >= p.trailerOffset {
+ panic(fmt.Errorf("offset table beyond beginning of trailer (0x%x, trailer@0x%x)", p.trailer.OffsetTableOffset, p.trailerOffset))
+ }
+
+ if p.trailer.OffsetTableOffset < 9 {
+ panic(fmt.Errorf("offset table begins inside header (0x%x)", p.trailer.OffsetTableOffset))
+ }
+
+ if p.trailerOffset > (p.trailer.NumObjects*uint64(p.trailer.OffsetIntSize))+p.trailer.OffsetTableOffset {
+ panic(errors.New("garbage between offset table and trailer"))
+ }
+
+ if p.trailer.OffsetTableOffset+(uint64(p.trailer.OffsetIntSize)*p.trailer.NumObjects) > p.trailerOffset {
+ panic(errors.New("offset table isn't long enough to address every object"))
+ }
+
+ maxObjectRef := uint64(1) << (8 * p.trailer.ObjectRefSize)
+ if p.trailer.NumObjects > maxObjectRef {
+ panic(fmt.Errorf("more objects (%v) than object ref size (%v bytes) can support", p.trailer.NumObjects, p.trailer.ObjectRefSize))
+ }
+
+ if p.trailer.OffsetIntSize < uint8(8) && (uint64(1)<<(8*p.trailer.OffsetIntSize)) <= p.trailer.OffsetTableOffset {
+ panic(errors.New("offset size isn't big enough to address entire file"))
+ }
+
+ if p.trailer.TopObject >= p.trailer.NumObjects {
+ panic(fmt.Errorf("top object #%d is out of range (only %d exist)", p.trailer.TopObject, p.trailer.NumObjects))
+ }
+}
+
+func (p *bplistParser) parseDocument() (pval cfValue, parseError error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+
+ parseError = plistParseError{"binary", r.(error)}
+ }
+ }()
+
+ p.buffer, _ = ioutil.ReadAll(p.reader)
+
+ l := len(p.buffer)
+ if l < 40 {
+ panic(errors.New("not enough data"))
+ }
+
+ if !bytes.Equal(p.buffer[0:6], []byte{'b', 'p', 'l', 'i', 's', 't'}) {
+ panic(errors.New("incomprehensible magic"))
+ }
+
+ p.version = int(((p.buffer[6] - '0') * 10) + (p.buffer[7] - '0'))
+
+ if p.version > 1 {
+ panic(fmt.Errorf("unexpected version %d", p.version))
+ }
+
+ p.trailerOffset = uint64(l - 32)
+ p.trailer = bplistTrailer{
+ SortVersion: p.buffer[p.trailerOffset+5],
+ OffsetIntSize: p.buffer[p.trailerOffset+6],
+ ObjectRefSize: p.buffer[p.trailerOffset+7],
+ NumObjects: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+8:]),
+ TopObject: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+16:]),
+ OffsetTableOffset: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+24:]),
+ }
+
+ p.validateDocumentTrailer()
+
+ // INVARIANTS:
+ // - Entire offset table is before trailer
+ // - Offset table begins after header
+ // - Offset table can address entire document
+ // - Object IDs are big enough to support the number of objects in this plist
+ // - Top object is in range
+
+ p.objects = make([]cfValue, p.trailer.NumObjects)
+
+ pval = p.objectAtIndex(p.trailer.TopObject)
+ return
+}
+
+// parseSizedInteger returns a 128-bit integer as low64, high64
+func (p *bplistParser) parseSizedInteger(off offset, nbytes int) (lo uint64, hi uint64, newOffset offset) {
+ // Per comments in CoreFoundation, format version 00 requires that all
+ // 1, 2 or 4-byte integers be interpreted as unsigned. 8-byte integers are
+ // signed (always?) and therefore must be sign extended here.
+ // negative 1, 2, or 4-byte integers are always emitted as 64-bit.
+ switch nbytes {
+ case 1:
+ lo, hi = uint64(p.buffer[off]), 0
+ case 2:
+ lo, hi = uint64(binary.BigEndian.Uint16(p.buffer[off:])), 0
+ case 4:
+ lo, hi = uint64(binary.BigEndian.Uint32(p.buffer[off:])), 0
+ case 8:
+ lo = binary.BigEndian.Uint64(p.buffer[off:])
+ if p.buffer[off]&0x80 != 0 {
+ // sign extend if lo is signed
+ hi = signedHighBits
+ }
+ case 16:
+ lo, hi = binary.BigEndian.Uint64(p.buffer[off+8:]), binary.BigEndian.Uint64(p.buffer[off:])
+ default:
+ panic(errors.New("illegal integer size"))
+ }
+ newOffset = off + offset(nbytes)
+ return
+}
+
+func (p *bplistParser) parseObjectRefAtOffset(off offset) (uint64, offset) {
+ oid, _, next := p.parseSizedInteger(off, int(p.trailer.ObjectRefSize))
+ return oid, next
+}
+
+func (p *bplistParser) parseOffsetAtOffset(off offset) (offset, offset) {
+ parsedOffset, _, next := p.parseSizedInteger(off, int(p.trailer.OffsetIntSize))
+ return offset(parsedOffset), next
+}
+
+func (p *bplistParser) objectAtIndex(index uint64) cfValue {
+ if index >= p.trailer.NumObjects {
+ panic(fmt.Errorf("invalid object#%d (max %d)", index, p.trailer.NumObjects))
+ }
+
+ if pval := p.objects[index]; pval != nil {
+ return pval
+ }
+
+ off, _ := p.parseOffsetAtOffset(offset(p.trailer.OffsetTableOffset + (index * uint64(p.trailer.OffsetIntSize))))
+ if off > offset(p.trailer.OffsetTableOffset-1) {
+ panic(fmt.Errorf("object#%d starts beyond beginning of object table (0x%x, table@0x%x)", index, off, p.trailer.OffsetTableOffset))
+ }
+
+ pval := p.parseTagAtOffset(off)
+ p.objects[index] = pval
+ return pval
+
+}
+
+func (p *bplistParser) pushNestedObject(off offset) {
+ for _, v := range p.containerStack {
+ if v == off {
+ p.panicNestedObject(off)
+ }
+ }
+ p.containerStack = append(p.containerStack, off)
+}
+
+func (p *bplistParser) panicNestedObject(off offset) {
+ ids := ""
+ for _, v := range p.containerStack {
+ ids += fmt.Sprintf("0x%x > ", v)
+ }
+
+ // %s0x%d: ids above ends with " > "
+ panic(fmt.Errorf("self-referential collection@0x%x (%s0x%x) cannot be deserialized", off, ids, off))
+}
+
+func (p *bplistParser) popNestedObject() {
+ p.containerStack = p.containerStack[:len(p.containerStack)-1]
+}
+
+func (p *bplistParser) parseTagAtOffset(off offset) cfValue {
+ tag := p.buffer[off]
+
+ switch tag & 0xF0 {
+ case bpTagNull:
+ switch tag & 0x0F {
+ case bpTagBoolTrue, bpTagBoolFalse:
+ return cfBoolean(tag == bpTagBoolTrue)
+ }
+ case bpTagInteger:
+ lo, hi, _ := p.parseIntegerAtOffset(off)
+ return &cfNumber{
+ signed: hi == signedHighBits, // a signed integer is stored as a 128-bit integer with the top 64 bits set
+ value: lo,
+ }
+ case bpTagReal:
+ nbytes := 1 << (tag & 0x0F)
+ switch nbytes {
+ case 4:
+ bits := binary.BigEndian.Uint32(p.buffer[off+1:])
+ return &cfReal{wide: false, value: float64(math.Float32frombits(bits))}
+ case 8:
+ bits := binary.BigEndian.Uint64(p.buffer[off+1:])
+ return &cfReal{wide: true, value: math.Float64frombits(bits)}
+ }
+ panic(errors.New("illegal float size"))
+ case bpTagDate:
+ bits := binary.BigEndian.Uint64(p.buffer[off+1:])
+ val := math.Float64frombits(bits)
+
+ // Apple Epoch is 20110101000000Z
+ // Adjust for UNIX Time
+ val += 978307200
+
+ sec, fsec := math.Modf(val)
+ time := time.Unix(int64(sec), int64(fsec*float64(time.Second))).In(time.UTC)
+ return cfDate(time)
+ case bpTagData:
+ data := p.parseDataAtOffset(off)
+ return cfData(data)
+ case bpTagASCIIString:
+ str := p.parseASCIIStringAtOffset(off)
+ return cfString(str)
+ case bpTagUTF16String:
+ str := p.parseUTF16StringAtOffset(off)
+ return cfString(str)
+ case bpTagUID: // Somehow different than int: low half is nbytes - 1 instead of log2(nbytes)
+ lo, _, _ := p.parseSizedInteger(off+1, int(tag&0xF)+1)
+ return cfUID(lo)
+ case bpTagDictionary:
+ return p.parseDictionaryAtOffset(off)
+ case bpTagArray:
+ return p.parseArrayAtOffset(off)
+ }
+ panic(fmt.Errorf("unexpected atom 0x%2.02x at offset 0x%x", tag, off))
+}
+
+func (p *bplistParser) parseIntegerAtOffset(off offset) (uint64, uint64, offset) {
+ tag := p.buffer[off]
+ return p.parseSizedInteger(off+1, 1<<(tag&0xF))
+}
+
+func (p *bplistParser) countForTagAtOffset(off offset) (uint64, offset) {
+ tag := p.buffer[off]
+ cnt := uint64(tag & 0x0F)
+ if cnt == 0xF {
+ cnt, _, off = p.parseIntegerAtOffset(off + 1)
+ return cnt, off
+ }
+ return cnt, off + 1
+}
+
+func (p *bplistParser) parseDataAtOffset(off offset) []byte {
+ len, start := p.countForTagAtOffset(off)
+ if start+offset(len) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("data@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start)))
+ }
+ return p.buffer[start : start+offset(len)]
+}
+
+func (p *bplistParser) parseASCIIStringAtOffset(off offset) string {
+ len, start := p.countForTagAtOffset(off)
+ if start+offset(len) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("ascii string@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start)))
+ }
+
+ return zeroCopy8BitString(p.buffer, int(start), int(len))
+}
+
+func (p *bplistParser) parseUTF16StringAtOffset(off offset) string {
+ len, start := p.countForTagAtOffset(off)
+ bytes := len * 2
+ if start+offset(bytes) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("utf16 string@0x%x too long (%v bytes, max is %v)", off, bytes, p.trailer.OffsetTableOffset-uint64(start)))
+ }
+
+ u16s := make([]uint16, len)
+ for i := offset(0); i < offset(len); i++ {
+ u16s[i] = binary.BigEndian.Uint16(p.buffer[start+(i*2):])
+ }
+ runes := utf16.Decode(u16s)
+ return string(runes)
+}
+
+func (p *bplistParser) parseObjectListAtOffset(off offset, count uint64) []cfValue {
+ if off+offset(count*uint64(p.trailer.ObjectRefSize)) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("list@0x%x length (%v) puts its end beyond the offset table at 0x%x", off, count, p.trailer.OffsetTableOffset))
+ }
+ objects := make([]cfValue, count)
+
+ next := off
+ var oid uint64
+ for i := uint64(0); i < count; i++ {
+ oid, next = p.parseObjectRefAtOffset(next)
+ objects[i] = p.objectAtIndex(oid)
+ }
+
+ return objects
+}
+
+func (p *bplistParser) parseDictionaryAtOffset(off offset) *cfDictionary {
+ p.pushNestedObject(off)
+ defer p.popNestedObject()
+
+ // a dictionary is an object list of [key key key val val val]
+ cnt, start := p.countForTagAtOffset(off)
+ objects := p.parseObjectListAtOffset(start, cnt*2)
+
+ keys := make([]string, cnt)
+ for i := uint64(0); i < cnt; i++ {
+ if str, ok := objects[i].(cfString); ok {
+ keys[i] = string(str)
+ } else {
+ panic(fmt.Errorf("dictionary@0x%x contains non-string key at index %d", off, i))
+ }
+ }
+
+ return &cfDictionary{
+ keys: keys,
+ values: objects[cnt:],
+ }
+}
+
+func (p *bplistParser) parseArrayAtOffset(off offset) *cfArray {
+ p.pushNestedObject(off)
+ defer p.popNestedObject()
+
+ // an array is just an object list
+ cnt, start := p.countForTagAtOffset(off)
+ return &cfArray{p.parseObjectListAtOffset(start, cnt)}
+}
+
+func newBplistParser(r io.ReadSeeker) *bplistParser {
+ return &bplistParser{reader: r}
+}
diff --git a/vendor/howett.net/plist/decode.go b/vendor/howett.net/plist/decode.go
new file mode 100644
index 0000000000..4c64667715
--- /dev/null
+++ b/vendor/howett.net/plist/decode.go
@@ -0,0 +1,119 @@
+package plist
+
+import (
+ "bytes"
+ "io"
+ "reflect"
+ "runtime"
+)
+
+type parser interface {
+ parseDocument() (cfValue, error)
+}
+
+// A Decoder reads a property list from an input stream.
+type Decoder struct {
+ // the format of the most-recently-decoded property list
+ Format int
+
+ reader io.ReadSeeker
+ lax bool
+}
+
+// Decode works like Unmarshal, except it reads the decoder stream to find property list elements.
+//
+// After Decoding, the Decoder's Format field will be set to one of the plist format constants.
+func (p *Decoder) Decode(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ header := make([]byte, 6)
+ p.reader.Read(header)
+ p.reader.Seek(0, 0)
+
+ var parser parser
+ var pval cfValue
+ if bytes.Equal(header, []byte("bplist")) {
+ parser = newBplistParser(p.reader)
+ pval, err = parser.parseDocument()
+ if err != nil {
+ // Had a bplist header, but still got an error: we have to die here.
+ return err
+ }
+ p.Format = BinaryFormat
+ } else {
+ parser = newXMLPlistParser(p.reader)
+ pval, err = parser.parseDocument()
+ if _, ok := err.(invalidPlistError); ok {
+ // Rewind: the XML parser might have exhausted the file.
+ p.reader.Seek(0, 0)
+ // We don't use parser here because we want the textPlistParser type
+ tp := newTextPlistParser(p.reader)
+ pval, err = tp.parseDocument()
+ if err != nil {
+ return err
+ }
+ p.Format = tp.format
+ if p.Format == OpenStepFormat {
+ // OpenStep property lists can only store strings,
+ // so we have to turn on lax mode here for the unmarshal step later.
+ p.lax = true
+ }
+ } else {
+ if err != nil {
+ return err
+ }
+ p.Format = XMLFormat
+ }
+ }
+
+ p.unmarshal(pval, reflect.ValueOf(v))
+ return
+}
+
+// NewDecoder returns a Decoder that reads property list elements from a stream reader, r.
+// NewDecoder requires a Seekable stream for the purposes of file type detection.
+func NewDecoder(r io.ReadSeeker) *Decoder {
+ return &Decoder{Format: InvalidFormat, reader: r, lax: false}
+}
+
+// Unmarshal parses a property list document and stores the result in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the type encodings that Marshal uses, allocating heap-borne types as necessary.
+//
+// When given a nil pointer, Unmarshal allocates a new value for it to point to.
+//
+// To decode property list values into an interface value, Unmarshal decodes the property list into the concrete value contained
+// in the interface value. If the interface value is nil, Unmarshal stores one of the following in the interface value:
+//
+// string, bool, uint64, float64
+// plist.UID for "CoreFoundation Keyed Archiver UIDs" (convertible to uint64)
+// []byte, for plist data
+// []interface{}, for plist arrays
+// map[string]interface{}, for plist dictionaries
+//
+// If a property list value is not appropriate for a given value type, Unmarshal aborts immediately and returns an error.
+//
+// As Go does not support 128-bit types, and we don't want to pretend we're giving the user integer types (as opposed to
+// secretly passing them structs), Unmarshal will drop the high 64 bits of any 128-bit integers encoded in binary property lists.
+// (This is important because CoreFoundation serializes some large 64-bit values as 128-bit values with an empty high half.)
+//
+// When Unmarshal encounters an OpenStep property list, it will enter a relaxed parsing mode: OpenStep property lists can only store
+// plain old data as strings, so we will attempt to recover integer, floating-point, boolean and date values wherever they are necessary.
+// (for example, if Unmarshal attempts to unmarshal an OpenStep property list into a time.Time, it will try to parse the string it
+// receives as a time.)
+//
+// Unmarshal returns the detected property list format and an error, if any.
+func Unmarshal(data []byte, v interface{}) (format int, err error) {
+ r := bytes.NewReader(data)
+ dec := NewDecoder(r)
+ err = dec.Decode(v)
+ format = dec.Format
+ return
+}
diff --git a/vendor/howett.net/plist/doc.go b/vendor/howett.net/plist/doc.go
new file mode 100644
index 0000000000..457e60b60b
--- /dev/null
+++ b/vendor/howett.net/plist/doc.go
@@ -0,0 +1,5 @@
+// Package plist implements encoding and decoding of Apple's "property list" format.
+// Property lists come in three sorts: plain text (GNUStep and OpenStep), XML and binary.
+// plist supports all of them.
+// The mapping between property list and Go objects is described in the documentation for the Marshal and Unmarshal functions.
+package plist
diff --git a/vendor/howett.net/plist/encode.go b/vendor/howett.net/plist/encode.go
new file mode 100644
index 0000000000..f81309b583
--- /dev/null
+++ b/vendor/howett.net/plist/encode.go
@@ -0,0 +1,126 @@
+package plist
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "reflect"
+ "runtime"
+)
+
+type generator interface {
+ generateDocument(cfValue)
+ Indent(string)
+}
+
+// An Encoder writes a property list to an output stream.
+type Encoder struct {
+ writer io.Writer
+ format int
+
+ indent string
+}
+
+// Encode writes the property list encoding of v to the stream.
+func (p *Encoder) Encode(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ pval := p.marshal(reflect.ValueOf(v))
+ if pval == nil {
+ panic(errors.New("plist: no root element to encode"))
+ }
+
+ var g generator
+ switch p.format {
+ case XMLFormat:
+ g = newXMLPlistGenerator(p.writer)
+ case BinaryFormat, AutomaticFormat:
+ g = newBplistGenerator(p.writer)
+ case OpenStepFormat, GNUStepFormat:
+ g = newTextPlistGenerator(p.writer, p.format)
+ }
+ g.Indent(p.indent)
+ g.generateDocument(pval)
+ return
+}
+
+// Indent turns on pretty-printing for the XML and Text property list formats.
+// Each element begins on a new line and is preceded by one or more copies of indent according to its nesting depth.
+func (p *Encoder) Indent(indent string) {
+ p.indent = indent
+}
+
+// NewEncoder returns an Encoder that writes an XML property list to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return NewEncoderForFormat(w, XMLFormat)
+}
+
+// NewEncoderForFormat returns an Encoder that writes a property list to w in the specified format.
+// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat).
+func NewEncoderForFormat(w io.Writer, format int) *Encoder {
+ return &Encoder{
+ writer: w,
+ format: format,
+ }
+}
+
+// NewBinaryEncoder returns an Encoder that writes a binary property list to w.
+func NewBinaryEncoder(w io.Writer) *Encoder {
+ return NewEncoderForFormat(w, BinaryFormat)
+}
+
+// Marshal returns the property list encoding of v in the specified format.
+//
+// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat).
+//
+// Marshal traverses the value v recursively.
+// Any nil values encountered, other than the root, will be silently discarded as
+// the property list format bears no representation for nil values.
+//
+// Strings, integers of varying size, floats and booleans are encoded unchanged.
+// Strings bearing non-ASCII runes will be encoded differently depending upon the property list format:
+// UTF-8 for XML property lists and UTF-16 for binary property lists.
+//
+// Slice and Array values are encoded as property list arrays, except for
+// []byte values, which are encoded as data.
+//
+// Map values encode as dictionaries. The map's key type must be string; there is no provision for encoding non-string dictionary keys.
+//
+// Struct values are encoded as dictionaries, with only exported fields being serialized. Struct field encoding may be influenced with the use of tags.
+// The tag format is:
+//
+// `plist:"[,flags...]"`
+//
+// The following flags are supported:
+//
+// omitempty Only include the field if it is not set to the zero value for its type.
+//
+// If the key is "-", the field is ignored.
+//
+// Anonymous struct fields are encoded as if their exported fields were exposed via the outer struct.
+//
+// Pointer values encode as the value pointed to.
+//
+// Channel, complex and function values cannot be encoded. Any attempt to do so causes Marshal to return an error.
+func Marshal(v interface{}, format int) ([]byte, error) {
+ return MarshalIndent(v, format, "")
+}
+
+// MarshalIndent works like Marshal, but each property list element
+// begins on a new line and is preceded by one or more copies of indent according to its nesting depth.
+func MarshalIndent(v interface{}, format int, indent string) ([]byte, error) {
+ buf := &bytes.Buffer{}
+ enc := NewEncoderForFormat(buf, format)
+ enc.Indent(indent)
+ if err := enc.Encode(v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/howett.net/plist/fuzz.go b/vendor/howett.net/plist/fuzz.go
new file mode 100644
index 0000000000..18a3b4b9e0
--- /dev/null
+++ b/vendor/howett.net/plist/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package plist
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ buf := bytes.NewReader(data)
+
+ var obj interface{}
+ if err := NewDecoder(buf).Decode(&obj); err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/howett.net/plist/marshal.go b/vendor/howett.net/plist/marshal.go
new file mode 100644
index 0000000000..e237d20af0
--- /dev/null
+++ b/vendor/howett.net/plist/marshal.go
@@ -0,0 +1,187 @@
+package plist
+
+import (
+ "encoding"
+ "reflect"
+ "time"
+)
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+var (
+ plistMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+)
+
+func implementsInterface(val reflect.Value, interfaceType reflect.Type) (interface{}, bool) {
+ if val.CanInterface() && val.Type().Implements(interfaceType) {
+ return val.Interface(), true
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(interfaceType) {
+ return pv.Interface(), true
+ }
+ }
+ return nil, false
+}
+
+func (p *Encoder) marshalPlistInterface(marshalable Marshaler) cfValue {
+ value, err := marshalable.MarshalPlist()
+ if err != nil {
+ panic(err)
+ }
+ return p.marshal(reflect.ValueOf(value))
+}
+
+// marshalTextInterface marshals a TextMarshaler to a plist string.
+func (p *Encoder) marshalTextInterface(marshalable encoding.TextMarshaler) cfValue {
+ s, err := marshalable.MarshalText()
+ if err != nil {
+ panic(err)
+ }
+ return cfString(s)
+}
+
+// marshalStruct marshals a reflected struct value to a plist dictionary
+func (p *Encoder) marshalStruct(typ reflect.Type, val reflect.Value) cfValue {
+ tinfo, _ := getTypeInfo(typ)
+
+ dict := &cfDictionary{
+ keys: make([]string, 0, len(tinfo.fields)),
+ values: make([]cfValue, 0, len(tinfo.fields)),
+ }
+ for _, finfo := range tinfo.fields {
+ value := finfo.value(val)
+ if !value.IsValid() || finfo.omitEmpty && isEmptyValue(value) {
+ continue
+ }
+ dict.keys = append(dict.keys, finfo.name)
+ dict.values = append(dict.values, p.marshal(value))
+ }
+
+ return dict
+}
+
+func (p *Encoder) marshalTime(val reflect.Value) cfValue {
+ time := val.Interface().(time.Time)
+ return cfDate(time)
+}
+
+func (p *Encoder) marshal(val reflect.Value) cfValue {
+ if !val.IsValid() {
+ return nil
+ }
+
+ if receiver, can := implementsInterface(val, plistMarshalerType); can {
+ return p.marshalPlistInterface(receiver.(Marshaler))
+ }
+
+ // time.Time implements TextMarshaler, but we need to store it in RFC3339
+ if val.Type() == timeType {
+ return p.marshalTime(val)
+ }
+ if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) {
+ ival := val.Elem()
+ if ival.IsValid() && ival.Type() == timeType {
+ return p.marshalTime(ival)
+ }
+ }
+
+ // Check for text marshaler.
+ if receiver, can := implementsInterface(val, textMarshalerType); can {
+ return p.marshalTextInterface(receiver.(encoding.TextMarshaler))
+ }
+
+ // Descend into pointers or interfaces
+ if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) {
+ val = val.Elem()
+ }
+
+ // We got this far and still may have an invalid anything or nil ptr/interface
+ if !val.IsValid() || ((val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface) && val.IsNil()) {
+ return nil
+ }
+
+ typ := val.Type()
+
+ if typ == uidType {
+ return cfUID(val.Uint())
+ }
+
+ if val.Kind() == reflect.Struct {
+ return p.marshalStruct(typ, val)
+ }
+
+ switch val.Kind() {
+ case reflect.String:
+ return cfString(val.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return &cfNumber{signed: true, value: uint64(val.Int())}
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return &cfNumber{signed: false, value: val.Uint()}
+ case reflect.Float32:
+ return &cfReal{wide: false, value: val.Float()}
+ case reflect.Float64:
+ return &cfReal{wide: true, value: val.Float()}
+ case reflect.Bool:
+ return cfBoolean(val.Bool())
+ case reflect.Slice, reflect.Array:
+ if typ.Elem().Kind() == reflect.Uint8 {
+ bytes := []byte(nil)
+ if val.CanAddr() && val.Kind() == reflect.Slice {
+ // arrays are may be addressable but do not support .Bytes
+ bytes = val.Bytes()
+ } else {
+ bytes = make([]byte, val.Len())
+ reflect.Copy(reflect.ValueOf(bytes), val)
+ }
+ return cfData(bytes)
+ } else {
+ values := make([]cfValue, val.Len())
+ for i, length := 0, val.Len(); i < length; i++ {
+ if subpval := p.marshal(val.Index(i)); subpval != nil {
+ values[i] = subpval
+ }
+ }
+ return &cfArray{values}
+ }
+ case reflect.Map:
+ if typ.Key().Kind() != reflect.String {
+ panic(&unknownTypeError{typ})
+ }
+
+ l := val.Len()
+ dict := &cfDictionary{
+ keys: make([]string, 0, l),
+ values: make([]cfValue, 0, l),
+ }
+ for _, keyv := range val.MapKeys() {
+ if subpval := p.marshal(val.MapIndex(keyv)); subpval != nil {
+ dict.keys = append(dict.keys, keyv.String())
+ dict.values = append(dict.values, subpval)
+ }
+ }
+ return dict
+ default:
+ panic(&unknownTypeError{typ})
+ }
+}
diff --git a/vendor/howett.net/plist/must.go b/vendor/howett.net/plist/must.go
new file mode 100644
index 0000000000..2c2523d973
--- /dev/null
+++ b/vendor/howett.net/plist/must.go
@@ -0,0 +1,50 @@
+package plist
+
+import (
+ "io"
+ "strconv"
+)
+
+type mustWriter struct {
+ io.Writer
+}
+
+func (w mustWriter) Write(p []byte) (int, error) {
+ n, err := w.Writer.Write(p)
+ if err != nil {
+ panic(err)
+ }
+ return n, nil
+}
+
+func mustParseInt(str string, base, bits int) int64 {
+ i, err := strconv.ParseInt(str, base, bits)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+func mustParseUint(str string, base, bits int) uint64 {
+ i, err := strconv.ParseUint(str, base, bits)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+func mustParseFloat(str string, bits int) float64 {
+ i, err := strconv.ParseFloat(str, bits)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+func mustParseBool(str string) bool {
+ i, err := strconv.ParseBool(str)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
diff --git a/vendor/howett.net/plist/plist.go b/vendor/howett.net/plist/plist.go
new file mode 100644
index 0000000000..8883e1c769
--- /dev/null
+++ b/vendor/howett.net/plist/plist.go
@@ -0,0 +1,83 @@
+package plist
+
+import (
+ "reflect"
+)
+
+// Property list format constants
+const (
+ // Used by Decoder to represent an invalid property list.
+ InvalidFormat int = 0
+
+ // Used to indicate total abandon with regards to Encoder's output format.
+ AutomaticFormat = 0
+
+ XMLFormat = 1
+ BinaryFormat = 2
+ OpenStepFormat = 3
+ GNUStepFormat = 4
+)
+
+var FormatNames = map[int]string{
+ InvalidFormat: "unknown/invalid",
+ XMLFormat: "XML",
+ BinaryFormat: "Binary",
+ OpenStepFormat: "OpenStep",
+ GNUStepFormat: "GNUStep",
+}
+
+type unknownTypeError struct {
+ typ reflect.Type
+}
+
+func (u *unknownTypeError) Error() string {
+ return "plist: can't marshal value of type " + u.typ.String()
+}
+
+type invalidPlistError struct {
+ format string
+ err error
+}
+
+func (e invalidPlistError) Error() string {
+ s := "plist: invalid " + e.format + " property list"
+ if e.err != nil {
+ s += ": " + e.err.Error()
+ }
+ return s
+}
+
+type plistParseError struct {
+ format string
+ err error
+}
+
+func (e plistParseError) Error() string {
+ s := "plist: error parsing " + e.format + " property list"
+ if e.err != nil {
+ s += ": " + e.err.Error()
+ }
+ return s
+}
+
+// A UID represents a unique object identifier. UIDs are serialized in a manner distinct from
+// that of integers.
+type UID uint64
+
+// Marshaler is the interface implemented by types that can marshal themselves into valid
+// property list objects. The returned value is marshaled in place of the original value
+// implementing Marshaler
+//
+// If an error is returned by MarshalPlist, marshaling stops and the error is returned.
+type Marshaler interface {
+ MarshalPlist() (interface{}, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal themselves from
+// property list objects. The UnmarshalPlist method receives a function that may
+// be called to unmarshal the original property list value into a field or variable.
+//
+// It is safe to call the unmarshal function more than once.
+type Unmarshaler interface {
+ UnmarshalPlist(unmarshal func(interface{}) error) error
+}
diff --git a/vendor/howett.net/plist/plist_types.go b/vendor/howett.net/plist/plist_types.go
new file mode 100644
index 0000000000..9836364473
--- /dev/null
+++ b/vendor/howett.net/plist/plist_types.go
@@ -0,0 +1,172 @@
+package plist
+
+import (
+ "hash/crc32"
+ "sort"
+ "time"
+ "strconv"
+)
+
+// magic value used in the non-binary encoding of UIDs
+// (stored as a dictionary mapping CF$UID->integer)
+const cfUIDMagic = "CF$UID"
+
+type cfValue interface {
+ typeName() string
+ hash() interface{}
+}
+
+type cfDictionary struct {
+ keys sort.StringSlice
+ values []cfValue
+}
+
+func (*cfDictionary) typeName() string {
+ return "dictionary"
+}
+
+func (p *cfDictionary) hash() interface{} {
+ return p
+}
+
+func (p *cfDictionary) Len() int {
+ return len(p.keys)
+}
+
+func (p *cfDictionary) Less(i, j int) bool {
+ return p.keys.Less(i, j)
+}
+
+func (p *cfDictionary) Swap(i, j int) {
+ p.keys.Swap(i, j)
+ p.values[i], p.values[j] = p.values[j], p.values[i]
+}
+
+func (p *cfDictionary) sort() {
+ sort.Sort(p)
+}
+
+func (p *cfDictionary) maybeUID(lax bool) cfValue {
+ if len(p.keys) == 1 && p.keys[0] == "CF$UID" && len(p.values) == 1 {
+ pval := p.values[0]
+ if integer, ok := pval.(*cfNumber); ok {
+ return cfUID(integer.value)
+ }
+ // Openstep only has cfString. Act like the unmarshaller a bit.
+ if lax {
+ if str, ok := pval.(cfString); ok {
+ if i, err := strconv.ParseUint(string(str), 10, 64); err == nil {
+ return cfUID(i)
+ }
+ }
+ }
+ }
+ return p
+}
+
+type cfArray struct {
+ values []cfValue
+}
+
+func (*cfArray) typeName() string {
+ return "array"
+}
+
+func (p *cfArray) hash() interface{} {
+ return p
+}
+
+type cfString string
+
+func (cfString) typeName() string {
+ return "string"
+}
+
+func (p cfString) hash() interface{} {
+ return string(p)
+}
+
+type cfNumber struct {
+ signed bool
+ value uint64
+}
+
+func (*cfNumber) typeName() string {
+ return "integer"
+}
+
+func (p *cfNumber) hash() interface{} {
+ if p.signed {
+ return int64(p.value)
+ }
+ return p.value
+}
+
+type cfReal struct {
+ wide bool
+ value float64
+}
+
+func (cfReal) typeName() string {
+ return "real"
+}
+
+func (p *cfReal) hash() interface{} {
+ if p.wide {
+ return p.value
+ }
+ return float32(p.value)
+}
+
+type cfBoolean bool
+
+func (cfBoolean) typeName() string {
+ return "boolean"
+}
+
+func (p cfBoolean) hash() interface{} {
+ return bool(p)
+}
+
+type cfUID UID
+
+func (cfUID) typeName() string {
+ return "UID"
+}
+
+func (p cfUID) hash() interface{} {
+ return p
+}
+
+func (p cfUID) toDict() *cfDictionary {
+ return &cfDictionary{
+ keys: []string{cfUIDMagic},
+ values: []cfValue{&cfNumber{
+ signed: false,
+ value: uint64(p),
+ }},
+ }
+}
+
+type cfData []byte
+
+func (cfData) typeName() string {
+ return "data"
+}
+
+func (p cfData) hash() interface{} {
+ // Data are uniqued by their checksums.
+ // Todo: Look at calculating this only once and storing it somewhere;
+ // crc32 is fairly quick, however.
+ return crc32.ChecksumIEEE([]byte(p))
+}
+
+type cfDate time.Time
+
+func (cfDate) typeName() string {
+ return "date"
+}
+
+func (p cfDate) hash() interface{} {
+ return time.Time(p)
+}
diff --git a/vendor/howett.net/plist/text_generator.go b/vendor/howett.net/plist/text_generator.go
new file mode 100644
index 0000000000..d71f02bbc8
--- /dev/null
+++ b/vendor/howett.net/plist/text_generator.go
@@ -0,0 +1,228 @@
+package plist
+
+import (
+ "encoding/hex"
+ "io"
+ "strconv"
+ "time"
+)
+
+type textPlistGenerator struct {
+ writer io.Writer
+ format int
+
+ quotableTable *characterSet
+
+ indent string
+ depth int
+
+ dictKvDelimiter, dictEntryDelimiter, arrayDelimiter []byte
+}
+
+var (
+ textPlistTimeLayout = "2006-01-02 15:04:05 -0700"
+ padding = "0000"
+)
+
+func (p *textPlistGenerator) generateDocument(pval cfValue) {
+ p.writePlistValue(pval)
+}
+
+func (p *textPlistGenerator) plistQuotedString(str string) string {
+ if str == "" {
+ return `""`
+ }
+ s := ""
+ quot := false
+ for _, r := range str {
+ if r > 0xFF {
+ quot = true
+ s += `\U`
+ us := strconv.FormatInt(int64(r), 16)
+ s += padding[len(us):]
+ s += us
+ } else if r > 0x7F {
+ quot = true
+ s += `\`
+ us := strconv.FormatInt(int64(r), 8)
+ s += padding[1+len(us):]
+ s += us
+ } else {
+ c := uint8(r)
+ if p.quotableTable.ContainsByte(c) {
+ quot = true
+ }
+
+ switch c {
+ case '\a':
+ s += `\a`
+ case '\b':
+ s += `\b`
+ case '\v':
+ s += `\v`
+ case '\f':
+ s += `\f`
+ case '\\':
+ s += `\\`
+ case '"':
+ s += `\"`
+ case '\t', '\r', '\n':
+ fallthrough
+ default:
+ s += string(c)
+ }
+ }
+ }
+ if quot {
+ s = `"` + s + `"`
+ }
+ return s
+}
+
+func (p *textPlistGenerator) deltaIndent(depthDelta int) {
+ if depthDelta < 0 {
+ p.depth--
+ } else if depthDelta > 0 {
+ p.depth++
+ }
+}
+
+func (p *textPlistGenerator) writeIndent() {
+ if len(p.indent) == 0 {
+ return
+ }
+ if len(p.indent) > 0 {
+ p.writer.Write([]byte("\n"))
+ for i := 0; i < p.depth; i++ {
+ io.WriteString(p.writer, p.indent)
+ }
+ }
+}
+
+func (p *textPlistGenerator) writePlistValue(pval cfValue) {
+ if pval == nil {
+ return
+ }
+
+ switch pval := pval.(type) {
+ case *cfDictionary:
+ pval.sort()
+ p.writer.Write([]byte(`{`))
+ p.deltaIndent(1)
+ for i, k := range pval.keys {
+ p.writeIndent()
+ io.WriteString(p.writer, p.plistQuotedString(k))
+ p.writer.Write(p.dictKvDelimiter)
+ p.writePlistValue(pval.values[i])
+ p.writer.Write(p.dictEntryDelimiter)
+ }
+ p.deltaIndent(-1)
+ p.writeIndent()
+ p.writer.Write([]byte(`}`))
+ case *cfArray:
+ p.writer.Write([]byte(`(`))
+ p.deltaIndent(1)
+ for _, v := range pval.values {
+ p.writeIndent()
+ p.writePlistValue(v)
+ p.writer.Write(p.arrayDelimiter)
+ }
+ p.deltaIndent(-1)
+ p.writeIndent()
+ p.writer.Write([]byte(`)`))
+ case cfString:
+ io.WriteString(p.writer, p.plistQuotedString(string(pval)))
+ case *cfNumber:
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`<*I`))
+ }
+ if pval.signed {
+ io.WriteString(p.writer, strconv.FormatInt(int64(pval.value), 10))
+ } else {
+ io.WriteString(p.writer, strconv.FormatUint(pval.value, 10))
+ }
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`>`))
+ }
+ case *cfReal:
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`<*R`))
+ }
+ // GNUstep does not differentiate between 32/64-bit floats.
+ io.WriteString(p.writer, strconv.FormatFloat(pval.value, 'g', -1, 64))
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`>`))
+ }
+ case cfBoolean:
+ if p.format == GNUStepFormat {
+ if pval {
+ p.writer.Write([]byte(`<*BY>`))
+ } else {
+ p.writer.Write([]byte(`<*BN>`))
+ }
+ } else {
+ if pval {
+ p.writer.Write([]byte(`1`))
+ } else {
+ p.writer.Write([]byte(`0`))
+ }
+ }
+ case cfData:
+ var hexencoded [9]byte
+ var l int
+ var asc = 9
+ hexencoded[8] = ' '
+
+ p.writer.Write([]byte(`<`))
+ b := []byte(pval)
+ for i := 0; i < len(b); i += 4 {
+ l = i + 4
+ if l >= len(b) {
+ l = len(b)
+ // We no longer need the space - or the rest of the buffer.
+ // (we used >= above to get this part without another conditional :P)
+ asc = (l - i) * 2
+ }
+ // Fill the buffer (only up to 8 characters, to preserve the space we implicitly include
+ // at the end of every encode)
+ hex.Encode(hexencoded[:8], b[i:l])
+ io.WriteString(p.writer, string(hexencoded[:asc]))
+ }
+ p.writer.Write([]byte(`>`))
+ case cfDate:
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`<*D`))
+ io.WriteString(p.writer, time.Time(pval).In(time.UTC).Format(textPlistTimeLayout))
+ p.writer.Write([]byte(`>`))
+ } else {
+ io.WriteString(p.writer, p.plistQuotedString(time.Time(pval).In(time.UTC).Format(textPlistTimeLayout)))
+ }
+ case cfUID:
+ p.writePlistValue(pval.toDict())
+ }
+}
+
+func (p *textPlistGenerator) Indent(i string) {
+ p.indent = i
+ if i == "" {
+ p.dictKvDelimiter = []byte(`=`)
+ } else {
+ // For pretty-printing
+ p.dictKvDelimiter = []byte(` = `)
+ }
+}
+
+func newTextPlistGenerator(w io.Writer, format int) *textPlistGenerator {
+ table := &osQuotable
+ if format == GNUStepFormat {
+ table = &gsQuotable
+ }
+ return &textPlistGenerator{
+ writer: mustWriter{w},
+ format: format,
+ quotableTable: table,
+ dictKvDelimiter: []byte(`=`),
+ arrayDelimiter: []byte(`,`),
+ dictEntryDelimiter: []byte(`;`),
+ }
+}
diff --git a/vendor/howett.net/plist/text_parser.go b/vendor/howett.net/plist/text_parser.go
new file mode 100644
index 0000000000..c60423ff8e
--- /dev/null
+++ b/vendor/howett.net/plist/text_parser.go
@@ -0,0 +1,580 @@
+// Parser for text plist formats.
+// @see https://github.com/apple/swift-corelibs-foundation/blob/master/CoreFoundation/Parsing.subproj/CFOldStylePList.c
+// @see https://github.com/gnustep/libs-base/blob/master/Source/NSPropertyList.m
+// This parser also handles strings files.
+
+package plist
+
+import (
+ "encoding/base64"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "runtime"
+ "strings"
+ "time"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+type textPlistParser struct {
+ reader io.Reader
+ format int
+
+ input string
+ start int
+ pos int
+ width int
+}
+
+func convertU16(buffer []byte, bo binary.ByteOrder) (string, error) {
+ if len(buffer)%2 != 0 {
+ return "", errors.New("truncated utf16")
+ }
+
+ tmp := make([]uint16, len(buffer)/2)
+ for i := 0; i < len(buffer); i += 2 {
+ tmp[i/2] = bo.Uint16(buffer[i : i+2])
+ }
+ return string(utf16.Decode(tmp)), nil
+}
+
+func guessEncodingAndConvert(buffer []byte) (string, error) {
+ if len(buffer) >= 3 && buffer[0] == 0xEF && buffer[1] == 0xBB && buffer[2] == 0xBF {
+ // UTF-8 BOM
+ return zeroCopy8BitString(buffer, 3, len(buffer)-3), nil
+ } else if len(buffer) >= 2 {
+ // UTF-16 guesses
+
+ switch {
+ // stream is big-endian (BOM is FE FF or head is 00 XX)
+ case (buffer[0] == 0xFE && buffer[1] == 0xFF):
+ return convertU16(buffer[2:], binary.BigEndian)
+ case (buffer[0] == 0 && buffer[1] != 0):
+ return convertU16(buffer, binary.BigEndian)
+
+ // stream is little-endian (BOM is FE FF or head is XX 00)
+ case (buffer[0] == 0xFF && buffer[1] == 0xFE):
+ return convertU16(buffer[2:], binary.LittleEndian)
+ case (buffer[0] != 0 && buffer[1] == 0):
+ return convertU16(buffer, binary.LittleEndian)
+ }
+ }
+
+ // fallback: assume ASCII (not great!)
+ return zeroCopy8BitString(buffer, 0, len(buffer)), nil
+}
+
+func (p *textPlistParser) parseDocument() (pval cfValue, parseError error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ // Wrap all non-invalid-plist errors.
+ parseError = plistParseError{"text", r.(error)}
+ }
+ }()
+
+ buffer, err := ioutil.ReadAll(p.reader)
+ if err != nil {
+ panic(err)
+ }
+
+ p.input, err = guessEncodingAndConvert(buffer)
+ if err != nil {
+ panic(err)
+ }
+
+ val := p.parsePlistValue()
+
+ p.skipWhitespaceAndComments()
+ if p.peek() != eof {
+ if _, ok := val.(cfString); !ok {
+ p.error("garbage after end of document")
+ }
+
+ // Try parsing as .strings.
+ // See -[NSDictionary propertyListFromStringsFileFormat:].
+ p.start = 0
+ p.pos = 0
+ val = p.parseDictionary(true)
+ }
+
+ pval = val
+
+ return
+}
+
+const eof rune = -1
+
+func (p *textPlistParser) error(e string, args ...interface{}) {
+ line := strings.Count(p.input[:p.pos], "\n")
+ char := p.pos - strings.LastIndex(p.input[:p.pos], "\n") - 1
+ panic(fmt.Errorf("%s at line %d character %d", fmt.Sprintf(e, args...), line, char))
+}
+
+func (p *textPlistParser) next() rune {
+ if int(p.pos) >= len(p.input) {
+ p.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(p.input[p.pos:])
+ p.width = w
+ p.pos += p.width
+ return r
+}
+
+func (p *textPlistParser) backup() {
+ p.pos -= p.width
+}
+
+func (p *textPlistParser) peek() rune {
+ r := p.next()
+ p.backup()
+ return r
+}
+
+func (p *textPlistParser) emit() string {
+ s := p.input[p.start:p.pos]
+ p.start = p.pos
+ return s
+}
+
+func (p *textPlistParser) ignore() {
+ p.start = p.pos
+}
+
+func (p *textPlistParser) empty() bool {
+ return p.start == p.pos
+}
+
+func (p *textPlistParser) scanUntil(ch rune) {
+ if x := strings.IndexRune(p.input[p.pos:], ch); x >= 0 {
+ p.pos += x
+ return
+ }
+ p.pos = len(p.input)
+}
+
+func (p *textPlistParser) scanUntilAny(chs string) {
+ if x := strings.IndexAny(p.input[p.pos:], chs); x >= 0 {
+ p.pos += x
+ return
+ }
+ p.pos = len(p.input)
+}
+
+func (p *textPlistParser) scanCharactersInSet(ch *characterSet) {
+ for ch.Contains(p.next()) {
+ }
+ p.backup()
+}
+
+func (p *textPlistParser) scanCharactersNotInSet(ch *characterSet) {
+ var r rune
+ for {
+ r = p.next()
+ if r == eof || ch.Contains(r) {
+ break
+ }
+ }
+ p.backup()
+}
+
+func (p *textPlistParser) skipWhitespaceAndComments() {
+ for {
+ p.scanCharactersInSet(&whitespace)
+ if strings.HasPrefix(p.input[p.pos:], "//") {
+ p.scanCharactersNotInSet(&newlineCharacterSet)
+ } else if strings.HasPrefix(p.input[p.pos:], "/*") {
+ if x := strings.Index(p.input[p.pos:], "*/"); x >= 0 {
+ p.pos += x + 2 // skip the */ as well
+ continue // consume more whitespace
+ } else {
+ p.error("unexpected eof in block comment")
+ }
+ } else {
+ break
+ }
+ }
+ p.ignore()
+}
+
+func (p *textPlistParser) parseOctalDigits(max int) uint64 {
+ var val uint64
+
+ for i := 0; i < max; i++ {
+ r := p.next()
+
+ if r >= '0' && r <= '7' {
+ val <<= 3
+ val |= uint64((r - '0'))
+ } else {
+ p.backup()
+ break
+ }
+ }
+ return val
+}
+
+func (p *textPlistParser) parseHexDigits(max int) uint64 {
+ var val uint64
+
+ for i := 0; i < max; i++ {
+ r := p.next()
+
+ if r >= 'a' && r <= 'f' {
+ val <<= 4
+ val |= 10 + uint64((r - 'a'))
+ } else if r >= 'A' && r <= 'F' {
+ val <<= 4
+ val |= 10 + uint64((r - 'A'))
+ } else if r >= '0' && r <= '9' {
+ val <<= 4
+ val |= uint64((r - '0'))
+ } else {
+ p.backup()
+ break
+ }
+ }
+ return val
+}
+
+// the \ has already been consumed
+func (p *textPlistParser) parseEscape() string {
+ var s string
+ switch p.next() {
+ case 'a':
+ s = "\a"
+ case 'b':
+ s = "\b"
+ case 'v':
+ s = "\v"
+ case 'f':
+ s = "\f"
+ case 't':
+ s = "\t"
+ case 'r':
+ s = "\r"
+ case 'n':
+ s = "\n"
+ case '\\':
+ s = `\`
+ case '"':
+ s = `"`
+ case 'x': // This is our extension.
+ s = string(rune(p.parseHexDigits(2)))
+ case 'u', 'U': // 'u' is a GNUstep extension.
+ s = string(rune(p.parseHexDigits(4)))
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ p.backup() // we've already consumed one of the digits
+ s = string(rune(p.parseOctalDigits(3)))
+ default:
+ p.backup() // everything else should be accepted
+ }
+ p.ignore() // skip the entire escape sequence
+ return s
+}
+
+// the " has already been consumed
+func (p *textPlistParser) parseQuotedString() cfString {
+ p.ignore() // ignore the "
+
+ slowPath := false
+ s := ""
+
+ for {
+ p.scanUntilAny(`"\`)
+ switch p.peek() {
+ case eof:
+ p.error("unexpected eof in quoted string")
+ case '"':
+ section := p.emit()
+ p.pos++ // skip "
+ if !slowPath {
+ return cfString(section)
+ } else {
+ s += section
+ return cfString(s)
+ }
+ case '\\':
+ slowPath = true
+ s += p.emit()
+ p.next() // consume \
+ s += p.parseEscape()
+ }
+ }
+}
+
+func (p *textPlistParser) parseUnquotedString() cfString {
+ p.scanCharactersNotInSet(&gsQuotable)
+ s := p.emit()
+ if s == "" {
+ p.error("invalid unquoted string (found an unquoted character that should be quoted?)")
+ }
+
+ return cfString(s)
+}
+
+// the { has already been consumed
+func (p *textPlistParser) parseDictionary(ignoreEof bool) cfValue {
+ //p.ignore() // ignore the {
+ var keypv cfValue
+ keys := make([]string, 0, 32)
+ values := make([]cfValue, 0, 32)
+outer:
+ for {
+ p.skipWhitespaceAndComments()
+
+ switch p.next() {
+ case eof:
+ if !ignoreEof {
+ p.error("unexpected eof in dictionary")
+ }
+ fallthrough
+ case '}':
+ break outer
+ case '"':
+ keypv = p.parseQuotedString()
+ default:
+ p.backup()
+ keypv = p.parseUnquotedString()
+ }
+
+ // INVARIANT: key can't be nil; parseQuoted and parseUnquoted
+ // will panic out before they return nil.
+
+ p.skipWhitespaceAndComments()
+
+ var val cfValue
+ n := p.next()
+ if n == ';' {
+ // This is supposed to be .strings-specific.
+ // GNUstep parses this as an empty string.
+ // Apple copies the key like we do.
+ val = keypv
+ } else if n == '=' {
+ // whitespace is consumed within
+ val = p.parsePlistValue()
+
+ p.skipWhitespaceAndComments()
+
+ if p.next() != ';' {
+ p.error("missing ; in dictionary")
+ }
+ } else {
+ p.error("missing = in dictionary")
+ }
+
+ keys = append(keys, string(keypv.(cfString)))
+ values = append(values, val)
+ }
+
+ dict := &cfDictionary{keys: keys, values: values}
+ return dict.maybeUID(p.format == OpenStepFormat)
+}
+
+// the ( has already been consumed
+func (p *textPlistParser) parseArray() *cfArray {
+ //p.ignore() // ignore the (
+ values := make([]cfValue, 0, 32)
+outer:
+ for {
+ p.skipWhitespaceAndComments()
+
+ switch p.next() {
+ case eof:
+ p.error("unexpected eof in array")
+ case ')':
+ break outer // done here
+ case ',':
+ continue // restart; ,) is valid and we don't want to blow it
+ default:
+ p.backup()
+ }
+
+ pval := p.parsePlistValue() // whitespace is consumed within
+ if str, ok := pval.(cfString); ok && string(str) == "" {
+ // Empty strings in arrays are apparently skipped?
+ // TODO: Figure out why this was implemented.
+ continue
+ }
+ values = append(values, pval)
+ }
+ return &cfArray{values}
+}
+
+// the <* have already been consumed
+func (p *textPlistParser) parseGNUStepValue() cfValue {
+ typ := p.next()
+
+ if typ == '>' || typ == eof { // <*>, <*EOF
+ p.error("invalid GNUStep extended value")
+ }
+
+ if typ != 'I' && typ != 'R' && typ != 'B' && typ != 'D' {
+ // early out: no need to collect the value if we'll fail to understand it
+ p.error("unknown GNUStep extended value type `" + string(typ) + "'")
+ }
+
+ if p.peek() == '"' { // <*x"
+ p.next()
+ }
+
+ p.ignore()
+ p.scanUntil('>')
+
+ if p.peek() == eof { // <*xEOF or <*x"EOF
+ p.error("unterminated GNUStep extended value")
+ }
+
+ if p.empty() { // <*x>, <*x"">
+ p.error("empty GNUStep extended value")
+ }
+
+ v := p.emit()
+ p.next() // consume the >
+
+ if v[len(v)-1] == '"' {
+ // GNUStep tolerates malformed quoted values, as in <*I5"> and <*I"5>
+ // It purportedly does so by stripping the trailing quote
+ v = v[:len(v)-1]
+ }
+
+ switch typ {
+ case 'I':
+ if v[0] == '-' {
+ n := mustParseInt(v, 10, 64)
+ return &cfNumber{signed: true, value: uint64(n)}
+ } else {
+ n := mustParseUint(v, 10, 64)
+ return &cfNumber{signed: false, value: n}
+ }
+ case 'R':
+ n := mustParseFloat(v, 64)
+ return &cfReal{wide: true, value: n} // TODO(DH) 32/64
+ case 'B':
+ b := v[0] == 'Y'
+ return cfBoolean(b)
+ case 'D':
+ t, err := time.Parse(textPlistTimeLayout, v)
+ if err != nil {
+ p.error(err.Error())
+ }
+
+ return cfDate(t.In(time.UTC))
+ }
+ // We should never get here; we checked the type above
+ return nil
+}
+
+// the <[ have already been consumed
+func (p *textPlistParser) parseGNUStepBase64() cfData {
+ p.ignore()
+ p.scanUntil(']')
+ v := p.emit()
+
+ if p.next() != ']' {
+ p.error("invalid GNUStep base64 data (expected ']')")
+ }
+
+ if p.next() != '>' {
+ p.error("invalid GNUStep base64 data (expected '>')")
+ }
+
+ // Emulate NSDataBase64DecodingIgnoreUnknownCharacters
+ filtered := strings.Map(base64ValidChars.Map, v)
+ data, err := base64.StdEncoding.DecodeString(filtered)
+ if err != nil {
+ p.error("invalid GNUStep base64 data: " + err.Error())
+ }
+ return cfData(data)
+}
+
+// The < has already been consumed
+func (p *textPlistParser) parseHexData() cfData {
+ buf := make([]byte, 256)
+ i := 0
+ c := 0
+
+ for {
+ r := p.next()
+ switch r {
+ case eof:
+ p.error("unexpected eof in data")
+ case '>':
+ if c&1 == 1 {
+ p.error("uneven number of hex digits in data")
+ }
+ p.ignore()
+ return cfData(buf[:i])
+ // Apple and GNUstep both want these in pairs. We are a bit more lax.
+ // GS accepts comments too, but that seems like a lot of work.
+ case ' ', '\t', '\n', '\r', '\u2028', '\u2029':
+ continue
+ }
+
+ buf[i] <<= 4
+ if r >= 'a' && r <= 'f' {
+ buf[i] |= 10 + byte((r - 'a'))
+ } else if r >= 'A' && r <= 'F' {
+ buf[i] |= 10 + byte((r - 'A'))
+ } else if r >= '0' && r <= '9' {
+ buf[i] |= byte((r - '0'))
+ } else {
+ p.error("unexpected hex digit `%c'", r)
+ }
+
+ c++
+ if c&1 == 0 {
+ i++
+ if i >= len(buf) {
+ realloc := make([]byte, len(buf)*2)
+ copy(realloc, buf)
+ buf = realloc
+ }
+ }
+ }
+}
+
+func (p *textPlistParser) parsePlistValue() cfValue {
+ for {
+ p.skipWhitespaceAndComments()
+
+ switch p.next() {
+ case eof:
+ return &cfDictionary{}
+ case '<':
+ switch p.next() {
+ case '*':
+ p.format = GNUStepFormat
+ return p.parseGNUStepValue()
+ case '[':
+ p.format = GNUStepFormat
+ return p.parseGNUStepBase64()
+ default:
+ p.backup()
+ return p.parseHexData()
+ }
+ case '"':
+ return p.parseQuotedString()
+ case '{':
+ return p.parseDictionary(false)
+ case '(':
+ return p.parseArray()
+ default:
+ p.backup()
+ return p.parseUnquotedString()
+ }
+ }
+}
+
+func newTextPlistParser(r io.Reader) *textPlistParser {
+ return &textPlistParser{
+ reader: r,
+ format: OpenStepFormat,
+ }
+}
diff --git a/vendor/howett.net/plist/text_tables.go b/vendor/howett.net/plist/text_tables.go
new file mode 100644
index 0000000000..2bdd7ba9f5
--- /dev/null
+++ b/vendor/howett.net/plist/text_tables.go
@@ -0,0 +1,61 @@
+package plist
+
+type characterSet [4]uint64
+
+func (s *characterSet) Map(ch rune) rune {
+ if s.Contains(ch) {
+ return ch
+ } else {
+ return -1
+ }
+}
+
+func (s *characterSet) Contains(ch rune) bool {
+ return ch >= 0 && ch <= 255 && s.ContainsByte(byte(ch))
+}
+
+func (s *characterSet) ContainsByte(ch byte) bool {
+ return (s[ch/64]&(1<<(ch%64)) > 0)
+}
+
+// Bitmap of characters that must be inside a quoted string
+// when written to an old-style property list
+// Low bits represent lower characters, and each uint64 represents 64 characters.
+var gsQuotable = characterSet{
+ 0x78001385ffffffff,
+ 0xa800000138000000,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+}
+
+// 7f instead of 3f in the top line: CFOldStylePlist.c says . is valid, but they quote it.
+// ef instead og 6f in the top line: ' will be quoted
+var osQuotable = characterSet{
+ 0xf4007fefffffffff,
+ 0xf8000001f8000001,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+}
+
+var whitespace = characterSet{
+ 0x0000000100003f00,
+ 0x0000000000000000,
+ 0x0000000000000000,
+ 0x0000000000000000,
+}
+
+var newlineCharacterSet = characterSet{
+ 0x0000000000002400,
+ 0x0000000000000000,
+ 0x0000000000000000,
+ 0x0000000000000000,
+}
+
+// Bitmap of characters that are valid in base64-encoded strings.
+// Used to filter out non-b64 characters to emulate NSDataBase64DecodingIgnoreUnknownCharacters
+var base64ValidChars = characterSet{
+ 0x23ff880000000000,
+ 0x07fffffe07fffffe,
+ 0x0000000000000000,
+ 0x0000000000000000,
+}
diff --git a/vendor/howett.net/plist/typeinfo.go b/vendor/howett.net/plist/typeinfo.go
new file mode 100644
index 0000000000..f0b920f8a8
--- /dev/null
+++ b/vendor/howett.net/plist/typeinfo.go
@@ -0,0 +1,170 @@
+package plist
+
+import (
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// typeInfo holds details for the plist representation of a type.
+type typeInfo struct {
+ fields []fieldInfo
+}
+
+// fieldInfo holds details for the plist representation of a single field.
+type fieldInfo struct {
+ idx []int
+ name string
+ omitEmpty bool
+}
+
+var tinfoMap = make(map[reflect.Type]*typeInfo)
+var tinfoLock sync.RWMutex
+
+// getTypeInfo returns the typeInfo structure with details necessary
+// for marshalling and unmarshalling typ.
+func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
+ tinfoLock.RLock()
+ tinfo, ok := tinfoMap[typ]
+ tinfoLock.RUnlock()
+ if ok {
+ return tinfo, nil
+ }
+ tinfo = &typeInfo{}
+ if typ.Kind() == reflect.Struct {
+ n := typ.NumField()
+ for i := 0; i < n; i++ {
+ f := typ.Field(i)
+ if f.PkgPath != "" || f.Tag.Get("plist") == "-" {
+ continue // Private field
+ }
+
+ // For embedded structs, embed its fields.
+ if f.Anonymous {
+ t := f.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Struct {
+ inner, err := getTypeInfo(t)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range inner.fields {
+ finfo.idx = append([]int{i}, finfo.idx...)
+ if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ }
+
+ finfo, err := structFieldInfo(typ, &f)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add the field if it doesn't conflict with other fields.
+ if err := addFieldInfo(typ, tinfo, finfo); err != nil {
+ return nil, err
+ }
+ }
+ }
+ tinfoLock.Lock()
+ tinfoMap[typ] = tinfo
+ tinfoLock.Unlock()
+ return tinfo, nil
+}
+
+// structFieldInfo builds and returns a fieldInfo for f.
+func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
+ finfo := &fieldInfo{idx: f.Index}
+
+ // Split the tag from the xml namespace if necessary.
+ tag := f.Tag.Get("plist")
+
+ // Parse flags.
+ tokens := strings.Split(tag, ",")
+ tag = tokens[0]
+ if len(tokens) > 1 {
+ tag = tokens[0]
+ for _, flag := range tokens[1:] {
+ switch flag {
+ case "omitempty":
+ finfo.omitEmpty = true
+ }
+ }
+ }
+
+ if tag == "" {
+ // If the name part of the tag is completely empty,
+ // use the field name
+ finfo.name = f.Name
+ return finfo, nil
+ }
+
+ finfo.name = tag
+ return finfo, nil
+}
+
+// addFieldInfo adds finfo to tinfo.fields if there are no
+// conflicts, or if conflicts arise from previous fields that were
+// obtained from deeper embedded structures than finfo. In the latter
+// case, the conflicting entries are dropped.
+// A conflict occurs when the path (parent + name) to a field is
+// itself a prefix of another path, or when two paths match exactly.
+// It is okay for field paths to share a common, shorter prefix.
+func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
+ var conflicts []int
+ // First, figure all conflicts. Most working code will have none.
+ for i := range tinfo.fields {
+ oldf := &tinfo.fields[i]
+ if newf.name == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ }
+
+ // Without conflicts, add the new field and return.
+ if conflicts == nil {
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+ }
+
+ // If any conflict is shallower, ignore the new field.
+ // This matches the Go field resolution on embedding.
+ for _, i := range conflicts {
+ if len(tinfo.fields[i].idx) < len(newf.idx) {
+ return nil
+ }
+ }
+
+ // Otherwise, the new field is shallower, and thus takes precedence,
+ // so drop the conflicting fields from tinfo and append the new one.
+ for c := len(conflicts) - 1; c >= 0; c-- {
+ i := conflicts[c]
+ copy(tinfo.fields[i:], tinfo.fields[i+1:])
+ tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
+ }
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+}
+
+// value returns v's field value corresponding to finfo.
+// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
+// and dereferences pointers as necessary.
+func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
+ for i, x := range finfo.idx {
+ if i > 0 {
+ t := v.Type()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
diff --git a/vendor/howett.net/plist/unmarshal.go b/vendor/howett.net/plist/unmarshal.go
new file mode 100644
index 0000000000..63b4b1d590
--- /dev/null
+++ b/vendor/howett.net/plist/unmarshal.go
@@ -0,0 +1,331 @@
+package plist
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "runtime"
+ "time"
+)
+
+type incompatibleDecodeTypeError struct {
+ dest reflect.Type
+ src string // type name (from cfValue)
+}
+
+func (u *incompatibleDecodeTypeError) Error() string {
+ return fmt.Sprintf("plist: type mismatch: tried to decode plist type `%v' into value of type `%v'", u.src, u.dest)
+}
+
+var (
+ plistUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ uidType = reflect.TypeOf(UID(0))
+)
+
+func isEmptyInterface(v reflect.Value) bool {
+ return v.Kind() == reflect.Interface && v.NumMethod() == 0
+}
+
+func (p *Decoder) unmarshalPlistInterface(pval cfValue, unmarshalable Unmarshaler) {
+ err := unmarshalable.UnmarshalPlist(func(i interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+ p.unmarshal(pval, reflect.ValueOf(i))
+ return
+ })
+
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (p *Decoder) unmarshalTextInterface(pval cfString, unmarshalable encoding.TextUnmarshaler) {
+ err := unmarshalable.UnmarshalText([]byte(pval))
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (p *Decoder) unmarshalTime(pval cfDate, val reflect.Value) {
+ val.Set(reflect.ValueOf(time.Time(pval)))
+}
+
+func (p *Decoder) unmarshalLaxString(s string, val reflect.Value) {
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i := mustParseInt(s, 10, 64)
+ val.SetInt(i)
+ return
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ i := mustParseUint(s, 10, 64)
+ val.SetUint(i)
+ return
+ case reflect.Float32, reflect.Float64:
+ f := mustParseFloat(s, 64)
+ val.SetFloat(f)
+ return
+ case reflect.Bool:
+ b := mustParseBool(s)
+ val.SetBool(b)
+ return
+ case reflect.Struct:
+ if val.Type() == timeType {
+ t, err := time.Parse(textPlistTimeLayout, s)
+ if err != nil {
+ panic(err)
+ }
+ val.Set(reflect.ValueOf(t.In(time.UTC)))
+ return
+ }
+ fallthrough
+ default:
+ panic(&incompatibleDecodeTypeError{val.Type(), "string"})
+ }
+}
+
+func (p *Decoder) unmarshal(pval cfValue, val reflect.Value) {
+ if pval == nil {
+ return
+ }
+
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if isEmptyInterface(val) {
+ v := p.valueInterface(pval)
+ val.Set(reflect.ValueOf(v))
+ return
+ }
+
+ incompatibleTypeError := &incompatibleDecodeTypeError{val.Type(), pval.typeName()}
+
+ // time.Time implements TextMarshaler, but we need to parse it as RFC3339
+ if date, ok := pval.(cfDate); ok {
+ if val.Type() == timeType {
+ p.unmarshalTime(date, val)
+ return
+ }
+ panic(incompatibleTypeError)
+ }
+
+ if receiver, can := implementsInterface(val, plistUnmarshalerType); can {
+ p.unmarshalPlistInterface(pval, receiver.(Unmarshaler))
+ return
+ }
+
+ if val.Type() != timeType {
+ if receiver, can := implementsInterface(val, textUnmarshalerType); can {
+ if str, ok := pval.(cfString); ok {
+ p.unmarshalTextInterface(str, receiver.(encoding.TextUnmarshaler))
+ } else {
+ panic(incompatibleTypeError)
+ }
+ return
+ }
+ }
+
+ typ := val.Type()
+
+ switch pval := pval.(type) {
+ case cfString:
+ if val.Kind() == reflect.String {
+ val.SetString(string(pval))
+ return
+ }
+ if p.lax {
+ p.unmarshalLaxString(string(pval), val)
+ return
+ }
+
+ panic(incompatibleTypeError)
+ case *cfNumber:
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val.SetInt(int64(pval.value))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val.SetUint(pval.value)
+ default:
+ panic(incompatibleTypeError)
+ }
+ case *cfReal:
+ if val.Kind() == reflect.Float32 || val.Kind() == reflect.Float64 {
+ // TODO: Consider warning on a downcast (storing a 64-bit value in a 32-bit reflect)
+ val.SetFloat(pval.value)
+ } else {
+ panic(incompatibleTypeError)
+ }
+ case cfBoolean:
+ if val.Kind() == reflect.Bool {
+ val.SetBool(bool(pval))
+ } else {
+ panic(incompatibleTypeError)
+ }
+ case cfData:
+ if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
+ panic(incompatibleTypeError)
+ }
+
+ if typ.Elem().Kind() != reflect.Uint8 {
+ panic(incompatibleTypeError)
+ }
+
+ b := []byte(pval)
+ switch val.Kind() {
+ case reflect.Slice:
+ val.SetBytes(b)
+ case reflect.Array:
+ if val.Len() < len(b) {
+ panic(fmt.Errorf("plist: attempted to unmarshal %d bytes into a byte array of size %d", len(b), val.Len()))
+ }
+ sval := reflect.ValueOf(b)
+ reflect.Copy(val, sval)
+ }
+ case cfUID:
+ if val.Type() == uidType {
+ val.SetUint(uint64(pval))
+ } else {
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val.SetInt(int64(pval))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val.SetUint(uint64(pval))
+ default:
+ panic(incompatibleTypeError)
+ }
+ }
+ case *cfArray:
+ p.unmarshalArray(pval, val)
+ case *cfDictionary:
+ p.unmarshalDictionary(pval, val)
+ }
+}
+
+func (p *Decoder) unmarshalArray(a *cfArray, val reflect.Value) {
+ var n int
+ if val.Kind() == reflect.Slice {
+ // Slice of element values.
+ // Grow slice.
+ cnt := len(a.values) + val.Len()
+ if cnt >= val.Cap() {
+ ncap := 2 * cnt
+ if ncap < 4 {
+ ncap = 4
+ }
+ new := reflect.MakeSlice(val.Type(), val.Len(), ncap)
+ reflect.Copy(new, val)
+ val.Set(new)
+ }
+ n = val.Len()
+ val.SetLen(cnt)
+ } else if val.Kind() == reflect.Array {
+ if len(a.values) > val.Cap() {
+ panic(fmt.Errorf("plist: attempted to unmarshal %d values into an array of size %d", len(a.values), val.Cap()))
+ }
+ } else {
+ panic(&incompatibleDecodeTypeError{val.Type(), a.typeName()})
+ }
+
+ // Recur to read element into slice.
+ for _, sval := range a.values {
+ p.unmarshal(sval, val.Index(n))
+ n++
+ }
+ return
+}
+
+func (p *Decoder) unmarshalDictionary(dict *cfDictionary, val reflect.Value) {
+ typ := val.Type()
+ switch val.Kind() {
+ case reflect.Struct:
+ tinfo, err := getTypeInfo(typ)
+ if err != nil {
+ panic(err)
+ }
+
+ entries := make(map[string]cfValue, len(dict.keys))
+ for i, k := range dict.keys {
+ sval := dict.values[i]
+ entries[k] = sval
+ }
+
+ for _, finfo := range tinfo.fields {
+ p.unmarshal(entries[finfo.name], finfo.value(val))
+ }
+ case reflect.Map:
+ if val.IsNil() {
+ val.Set(reflect.MakeMap(typ))
+ }
+
+ for i, k := range dict.keys {
+ sval := dict.values[i]
+
+ keyv := reflect.ValueOf(k).Convert(typ.Key())
+ mapElem := reflect.New(typ.Elem()).Elem()
+
+ p.unmarshal(sval, mapElem)
+ val.SetMapIndex(keyv, mapElem)
+ }
+ default:
+ panic(&incompatibleDecodeTypeError{typ, dict.typeName()})
+ }
+}
+
+/* *Interface is modelled after encoding/json */
+func (p *Decoder) valueInterface(pval cfValue) interface{} {
+ switch pval := pval.(type) {
+ case cfString:
+ return string(pval)
+ case *cfNumber:
+ if pval.signed {
+ return int64(pval.value)
+ }
+ return pval.value
+ case *cfReal:
+ if pval.wide {
+ return pval.value
+ } else {
+ return float32(pval.value)
+ }
+ case cfBoolean:
+ return bool(pval)
+ case *cfArray:
+ return p.arrayInterface(pval)
+ case *cfDictionary:
+ return p.dictionaryInterface(pval)
+ case cfData:
+ return []byte(pval)
+ case cfDate:
+ return time.Time(pval)
+ case cfUID:
+ return UID(pval)
+ }
+ return nil
+}
+
+func (p *Decoder) arrayInterface(a *cfArray) []interface{} {
+ out := make([]interface{}, len(a.values))
+ for i, subv := range a.values {
+ out[i] = p.valueInterface(subv)
+ }
+ return out
+}
+
+func (p *Decoder) dictionaryInterface(dict *cfDictionary) map[string]interface{} {
+ out := make(map[string]interface{})
+ for i, k := range dict.keys {
+ subv := dict.values[i]
+ out[k] = p.valueInterface(subv)
+ }
+ return out
+}
diff --git a/vendor/howett.net/plist/util.go b/vendor/howett.net/plist/util.go
new file mode 100644
index 0000000000..d4e437a4fb
--- /dev/null
+++ b/vendor/howett.net/plist/util.go
@@ -0,0 +1,25 @@
+package plist
+
+import "io"
+
+type countedWriter struct {
+ io.Writer
+ nbytes int
+}
+
+func (w *countedWriter) Write(p []byte) (int, error) {
+ n, err := w.Writer.Write(p)
+ w.nbytes += n
+ return n, err
+}
+
+func (w *countedWriter) BytesWritten() int {
+ return w.nbytes
+}
+
+func unsignedGetBase(s string) (string, int) {
+ if len(s) > 1 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') {
+ return s[2:], 16
+ }
+ return s, 10
+}
diff --git a/vendor/howett.net/plist/xml_generator.go b/vendor/howett.net/plist/xml_generator.go
new file mode 100644
index 0000000000..30597c1601
--- /dev/null
+++ b/vendor/howett.net/plist/xml_generator.go
@@ -0,0 +1,178 @@
+package plist
+
+import (
+ "bufio"
+ "encoding/base64"
+ "encoding/xml"
+ "io"
+ "math"
+ "strconv"
+ "time"
+)
+
+const (
+ xmlHEADER string = `` + "\n"
+ xmlDOCTYPE = `` + "\n"
+ xmlArrayTag = "array"
+ xmlDataTag = "data"
+ xmlDateTag = "date"
+ xmlDictTag = "dict"
+ xmlFalseTag = "false"
+ xmlIntegerTag = "integer"
+ xmlKeyTag = "key"
+ xmlPlistTag = "plist"
+ xmlRealTag = "real"
+ xmlStringTag = "string"
+ xmlTrueTag = "true"
+)
+
+func formatXMLFloat(f float64) string {
+ switch {
+ case math.IsInf(f, 1):
+ return "inf"
+ case math.IsInf(f, -1):
+ return "-inf"
+ case math.IsNaN(f):
+ return "nan"
+ }
+ return strconv.FormatFloat(f, 'g', -1, 64)
+}
+
+type xmlPlistGenerator struct {
+ *bufio.Writer
+
+ indent string
+ depth int
+ putNewline bool
+}
+
+func (p *xmlPlistGenerator) generateDocument(root cfValue) {
+ p.WriteString(xmlHEADER)
+ p.WriteString(xmlDOCTYPE)
+
+ p.openTag(`plist version="1.0"`)
+ p.writePlistValue(root)
+ p.closeTag(xmlPlistTag)
+ p.Flush()
+}
+
+func (p *xmlPlistGenerator) openTag(n string) {
+ p.writeIndent(1)
+ p.WriteByte('<')
+ p.WriteString(n)
+ p.WriteByte('>')
+}
+
+func (p *xmlPlistGenerator) closeTag(n string) {
+ p.writeIndent(-1)
+ p.WriteString("")
+ p.WriteString(n)
+ p.WriteByte('>')
+}
+
+func (p *xmlPlistGenerator) element(n string, v string) {
+ p.writeIndent(0)
+ if len(v) == 0 {
+ p.WriteByte('<')
+ p.WriteString(n)
+ p.WriteString("/>")
+ } else {
+ p.WriteByte('<')
+ p.WriteString(n)
+ p.WriteByte('>')
+
+ err := xml.EscapeText(p.Writer, []byte(v))
+ if err != nil {
+ panic(err)
+ }
+
+ p.WriteString("")
+ p.WriteString(n)
+ p.WriteByte('>')
+ }
+}
+
+func (p *xmlPlistGenerator) writeDictionary(dict *cfDictionary) {
+ dict.sort()
+ p.openTag(xmlDictTag)
+ for i, k := range dict.keys {
+ p.element(xmlKeyTag, k)
+ p.writePlistValue(dict.values[i])
+ }
+ p.closeTag(xmlDictTag)
+}
+
+func (p *xmlPlistGenerator) writeArray(a *cfArray) {
+ p.openTag(xmlArrayTag)
+ for _, v := range a.values {
+ p.writePlistValue(v)
+ }
+ p.closeTag(xmlArrayTag)
+}
+
+func (p *xmlPlistGenerator) writePlistValue(pval cfValue) {
+ if pval == nil {
+ return
+ }
+
+ switch pval := pval.(type) {
+ case cfString:
+ p.element(xmlStringTag, string(pval))
+ case *cfNumber:
+ if pval.signed {
+ p.element(xmlIntegerTag, strconv.FormatInt(int64(pval.value), 10))
+ } else {
+ p.element(xmlIntegerTag, strconv.FormatUint(pval.value, 10))
+ }
+ case *cfReal:
+ p.element(xmlRealTag, formatXMLFloat(pval.value))
+ case cfBoolean:
+ if bool(pval) {
+ p.element(xmlTrueTag, "")
+ } else {
+ p.element(xmlFalseTag, "")
+ }
+ case cfData:
+ p.element(xmlDataTag, base64.StdEncoding.EncodeToString([]byte(pval)))
+ case cfDate:
+ p.element(xmlDateTag, time.Time(pval).In(time.UTC).Format(time.RFC3339))
+ case *cfDictionary:
+ p.writeDictionary(pval)
+ case *cfArray:
+ p.writeArray(pval)
+ case cfUID:
+ p.writePlistValue(pval.toDict())
+ }
+}
+
+func (p *xmlPlistGenerator) writeIndent(delta int) {
+ if len(p.indent) == 0 {
+ return
+ }
+
+ if delta < 0 {
+ p.depth--
+ }
+
+ if p.putNewline {
+ // from encoding/xml/marshal.go; it seems to be intended
+ // to suppress the first newline.
+ p.WriteByte('\n')
+ } else {
+ p.putNewline = true
+ }
+ for i := 0; i < p.depth; i++ {
+ p.WriteString(p.indent)
+ }
+ if delta > 0 {
+ p.depth++
+ }
+}
+
+func (p *xmlPlistGenerator) Indent(i string) {
+ p.indent = i
+}
+
+func newXMLPlistGenerator(w io.Writer) *xmlPlistGenerator {
+ return &xmlPlistGenerator{Writer: bufio.NewWriter(w)}
+}
diff --git a/vendor/howett.net/plist/xml_parser.go b/vendor/howett.net/plist/xml_parser.go
new file mode 100644
index 0000000000..7415ef3e07
--- /dev/null
+++ b/vendor/howett.net/plist/xml_parser.go
@@ -0,0 +1,211 @@
+package plist
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "time"
+)
+
+type xmlPlistParser struct {
+ reader io.Reader
+ xmlDecoder *xml.Decoder
+ whitespaceReplacer *strings.Replacer
+ ntags int
+}
+
+func (p *xmlPlistParser) parseDocument() (pval cfValue, parseError error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if _, ok := r.(invalidPlistError); ok {
+ parseError = r.(error)
+ } else {
+ // Wrap all non-invalid-plist errors.
+ parseError = plistParseError{"XML", r.(error)}
+ }
+ }
+ }()
+ for {
+ if token, err := p.xmlDecoder.Token(); err == nil {
+ if element, ok := token.(xml.StartElement); ok {
+ pval = p.parseXMLElement(element)
+ if p.ntags == 0 {
+ panic(invalidPlistError{"XML", errors.New("no elements encountered")})
+ }
+ return
+ }
+ } else {
+ // The first XML parse turned out to be invalid:
+ // we do not have an XML property list.
+ panic(invalidPlistError{"XML", err})
+ }
+ }
+}
+
+func (p *xmlPlistParser) parseXMLElement(element xml.StartElement) cfValue {
+ var charData xml.CharData
+ switch element.Name.Local {
+ case "plist":
+ p.ntags++
+ for {
+ token, err := p.xmlDecoder.Token()
+ if err != nil {
+ panic(err)
+ }
+
+ if el, ok := token.(xml.EndElement); ok && el.Name.Local == "plist" {
+ break
+ }
+
+ if el, ok := token.(xml.StartElement); ok {
+ return p.parseXMLElement(el)
+ }
+ }
+ return nil
+ case "string":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ return cfString(charData)
+ case "integer":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ s := string(charData)
+ if len(s) == 0 {
+ panic(errors.New("invalid empty "))
+ }
+
+ if s[0] == '-' {
+ s, base := unsignedGetBase(s[1:])
+ n := mustParseInt("-"+s, base, 64)
+ return &cfNumber{signed: true, value: uint64(n)}
+ } else {
+ s, base := unsignedGetBase(s)
+ n := mustParseUint(s, base, 64)
+ return &cfNumber{signed: false, value: n}
+ }
+ case "real":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ n := mustParseFloat(string(charData), 64)
+ return &cfReal{wide: true, value: n}
+ case "true", "false":
+ p.ntags++
+ p.xmlDecoder.Skip()
+
+ b := element.Name.Local == "true"
+ return cfBoolean(b)
+ case "date":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ t, err := time.ParseInLocation(time.RFC3339, string(charData), time.UTC)
+ if err != nil {
+ panic(err)
+ }
+
+ return cfDate(t)
+ case "data":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ str := p.whitespaceReplacer.Replace(string(charData))
+
+ l := base64.StdEncoding.DecodedLen(len(str))
+ bytes := make([]uint8, l)
+ l, err = base64.StdEncoding.Decode(bytes, []byte(str))
+ if err != nil {
+ panic(err)
+ }
+
+ return cfData(bytes[:l])
+ case "dict":
+ p.ntags++
+ var key *string
+ keys := make([]string, 0, 32)
+ values := make([]cfValue, 0, 32)
+ for {
+ token, err := p.xmlDecoder.Token()
+ if err != nil {
+ panic(err)
+ }
+
+ if el, ok := token.(xml.EndElement); ok && el.Name.Local == "dict" {
+ if key != nil {
+ panic(errors.New("missing value in dictionary"))
+ }
+ break
+ }
+
+ if el, ok := token.(xml.StartElement); ok {
+ if el.Name.Local == "key" {
+ var k string
+ p.xmlDecoder.DecodeElement(&k, &el)
+ key = &k
+ } else {
+ if key == nil {
+ panic(errors.New("missing key in dictionary"))
+ }
+ keys = append(keys, *key)
+ values = append(values, p.parseXMLElement(el))
+ key = nil
+ }
+ }
+ }
+
+ dict := &cfDictionary{keys: keys, values: values}
+ return dict.maybeUID(false)
+ case "array":
+ p.ntags++
+ values := make([]cfValue, 0, 10)
+ for {
+ token, err := p.xmlDecoder.Token()
+ if err != nil {
+ panic(err)
+ }
+
+ if el, ok := token.(xml.EndElement); ok && el.Name.Local == "array" {
+ break
+ }
+
+ if el, ok := token.(xml.StartElement); ok {
+ values = append(values, p.parseXMLElement(el))
+ }
+ }
+ return &cfArray{values}
+ }
+ err := fmt.Errorf("encountered unknown element %s", element.Name.Local)
+ if p.ntags == 0 {
+ // If out first XML tag is invalid, it might be an openstep data element, ala or <0101>
+ panic(invalidPlistError{"XML", err})
+ }
+ panic(err)
+}
+
+func newXMLPlistParser(r io.Reader) *xmlPlistParser {
+ return &xmlPlistParser{r, xml.NewDecoder(r), strings.NewReplacer("\t", "", "\n", "", " ", "", "\r", ""), 0}
+}
diff --git a/vendor/howett.net/plist/zerocopy.go b/vendor/howett.net/plist/zerocopy.go
new file mode 100644
index 0000000000..999f401b7e
--- /dev/null
+++ b/vendor/howett.net/plist/zerocopy.go
@@ -0,0 +1,20 @@
+// +build !appengine
+
+package plist
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func zeroCopy8BitString(buf []byte, off int, len int) string {
+ if len == 0 {
+ return ""
+ }
+
+ var s string
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ hdr.Data = uintptr(unsafe.Pointer(&buf[off]))
+ hdr.Len = len
+ return s
+}
diff --git a/vendor/howett.net/plist/zerocopy_appengine.go b/vendor/howett.net/plist/zerocopy_appengine.go
new file mode 100644
index 0000000000..dbd9a1acfd
--- /dev/null
+++ b/vendor/howett.net/plist/zerocopy_appengine.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package plist
+
+func zeroCopy8BitString(buf []byte, off int, len int) string {
+ return string(buf[off : off+len])
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 876b4e481f..36564732de 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -15,8 +15,8 @@ github.com/Azure/go-ansiterm/winterm
## explicit; go 1.12
github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/pkg/guid
-# github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d
-## explicit
+# github.com/StackExchange/wmi v1.2.1
+## explicit; go 1.13
github.com/StackExchange/wmi
# github.com/VictoriaMetrics/metrics v1.18.1
## explicit; go 1.12
@@ -45,11 +45,14 @@ github.com/fatih/color
# github.com/gen2brain/dlgs v0.0.0-20210911090025-cbd38e821b98
## explicit
github.com/gen2brain/dlgs
+# github.com/ghodss/yaml v1.0.0
+## explicit
+github.com/ghodss/yaml
# github.com/go-chi/chi/v5 v5.0.8-0.20220103230436-7dbe9a0bd10f
## explicit; go 1.14
github.com/go-chi/chi/v5
github.com/go-chi/chi/v5/middleware
-# github.com/go-ole/go-ole v1.2.4
+# github.com/go-ole/go-ole v1.2.6
## explicit; go 1.12
github.com/go-ole/go-ole
github.com/go-ole/go-ole/oleutil
@@ -76,6 +79,32 @@ github.com/ivanpirog/coloredcobra
# github.com/james-barrow/golang-ipc v0.0.0-20210227130457-95e7cc81f5e2
## explicit; go 1.15
github.com/james-barrow/golang-ipc
+# github.com/jaypipes/ghw v0.9.0
+## explicit; go 1.15
+github.com/jaypipes/ghw
+github.com/jaypipes/ghw/pkg/baseboard
+github.com/jaypipes/ghw/pkg/bios
+github.com/jaypipes/ghw/pkg/block
+github.com/jaypipes/ghw/pkg/chassis
+github.com/jaypipes/ghw/pkg/context
+github.com/jaypipes/ghw/pkg/cpu
+github.com/jaypipes/ghw/pkg/gpu
+github.com/jaypipes/ghw/pkg/linuxdmi
+github.com/jaypipes/ghw/pkg/linuxpath
+github.com/jaypipes/ghw/pkg/marshal
+github.com/jaypipes/ghw/pkg/memory
+github.com/jaypipes/ghw/pkg/net
+github.com/jaypipes/ghw/pkg/option
+github.com/jaypipes/ghw/pkg/pci
+github.com/jaypipes/ghw/pkg/pci/address
+github.com/jaypipes/ghw/pkg/product
+github.com/jaypipes/ghw/pkg/snapshot
+github.com/jaypipes/ghw/pkg/topology
+github.com/jaypipes/ghw/pkg/unitutil
+github.com/jaypipes/ghw/pkg/util
+# github.com/jaypipes/pcidb v1.0.0
+## explicit; go 1.17
+github.com/jaypipes/pcidb
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
@@ -105,6 +134,9 @@ github.com/mattn/go-isatty
# github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
## explicit
github.com/mgutz/ansi
+# github.com/mitchellh/go-homedir v1.1.0
+## explicit
+github.com/mitchellh/go-homedir
# github.com/mmcloughlin/avo v0.0.0-20200523190732-4439b6b2c061
## explicit; go 1.11
github.com/mmcloughlin/avo/attr
@@ -310,9 +342,15 @@ golang.zx2c4.com/wireguard/rwcancel
golang.zx2c4.com/wireguard/tun
golang.zx2c4.com/wireguard/tun/wintun
golang.zx2c4.com/wireguard/tun/wintun/memmod
+# gopkg.in/yaml.v2 v2.4.0
+## explicit; go 1.15
+gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
## explicit
gopkg.in/yaml.v3
+# howett.net/plist v1.0.0
+## explicit; go 1.12
+howett.net/plist
# nhooyr.io/websocket v1.8.2
## explicit; go 1.13
nhooyr.io/websocket