diff --git a/ci_scripts/run-pkg-tests.sh b/ci_scripts/run-pkg-tests.sh index 40555c455..cb70c448c 100755 --- a/ci_scripts/run-pkg-tests.sh +++ b/ci_scripts/run-pkg-tests.sh @@ -11,22 +11,22 @@ go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/app -run TestProtocol >> ./logs/pkg/TestProtocol.log go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/app -run TestProtocolParallel >> ./logs/pkg/TestProtocolParallel.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/hypervisor -run TestNewNode >> ./logs/pkg/TestNewNode.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/hypervisor -run TestNewVisor >> ./logs/pkg/TestNewVisor.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestDmsgDiscovery >> ./logs/pkg/TestDmsgDiscovery.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestTransportDiscovery >> ./logs/pkg/TestTransportDiscovery.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestTransportLogStore >> ./logs/pkg/TestTransportLogStore.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestRoutingTable >> ./logs/pkg/TestRoutingTable.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestAppsConfig >> ./logs/pkg/TestAppsConfig.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestAppsDir >> ./logs/pkg/TestAppsDir.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestLocalDir >> ./logs/pkg/TestLocalDir.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestNewNode >> ./logs/pkg/TestNewNode.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestNodeStartClose >> ./logs/pkg/TestNodeStartClose.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestNodeSpawnApp >> ./logs/pkg/TestNodeSpawnApp.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestNodeSpawnAppValidations >> ./logs/pkg/TestNodeSpawnAppValidations.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestListApps >> ./logs/pkg/TestListApps.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestStartStopApp >> ./logs/pkg/TestStartStopApp.log -go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/node -run TestRPC >> ./logs/pkg/TestRPC.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestDmsgDiscovery >> ./logs/pkg/TestDmsgDiscovery.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestTransportDiscovery >> ./logs/pkg/TestTransportDiscovery.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestTransportLogStore >> ./logs/pkg/TestTransportLogStore.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestRoutingTable >> ./logs/pkg/TestRoutingTable.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestAppsConfig >> ./logs/pkg/TestAppsConfig.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestAppsDir >> ./logs/pkg/TestAppsDir.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestLocalDir >> ./logs/pkg/TestLocalDir.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestNewVisor >> ./logs/pkg/TestNewVisor.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestVisorStartClose >> ./logs/pkg/TestVisorStartClose.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestVisorSpawnApp >> ./logs/pkg/TestVisorSpawnApp.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestVisorSpawnAppValidations >> ./logs/pkg/TestVisorSpawnAppValidations.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestListApps >> ./logs/pkg/TestListApps.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestStartStopApp >> ./logs/pkg/TestStartStopApp.log +go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/visor -run TestRPC >> ./logs/pkg/TestRPC.log go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/router -run TestAppManagerInit >> ./logs/pkg/TestAppManagerInit.log go clean -testcache &> /dev/null || go test -race -tags no_ci -cover -timeout=5m github.com/SkycoinProject/skywire-mainnet/pkg/router -run TestAppManagerSetupLoop >> ./logs/pkg/TestAppManagerSetupLoop.log diff --git a/cmd/apps/helloworld/helloworld.go b/cmd/apps/helloworld/helloworld.go index 3c2f73197..a8854f2cb 100644 --- a/cmd/apps/helloworld/helloworld.go +++ b/cmd/apps/helloworld/helloworld.go @@ -55,7 +55,7 @@ func main() { log.Printf("Message from %s: %s\n", conn.RemoteAddr().String(), string(buf)) if _, err := conn.Write([]byte("pong")); err != nil { - log.Printf("Failed to write to a remote node: %v\n", err) + log.Printf("Failed to write to a remote visor: %v\n", err) // TODO: close conn } }() @@ -77,7 +77,7 @@ func main() { } if _, err := conn.Write([]byte("ping")); err != nil { - log.Fatalf("Failed to write to a remote node: %v\n", err) + log.Fatalf("Failed to write to a remote visor: %v\n", err) } buf := make([]byte, 4) diff --git a/cmd/apps/skychat/README.md b/cmd/apps/skychat/README.md index 374c9241d..0aa9b2cde 100644 --- a/cmd/apps/skychat/README.md +++ b/cmd/apps/skychat/README.md @@ -8,7 +8,7 @@ Chat only supports one WEB client user at a time. ## Local setup -Create 2 node config files: +Create 2 visor config files: `skywire1.json` @@ -41,7 +41,7 @@ Create 2 node config files: } ``` -Compile binaries and start 2 nodes: +Compile binaries and start 2 visors: ```bash $ go build -o apps/skychat.v1.0 ./cmd/apps/skychat diff --git a/cmd/apps/skysocks-client/README.md b/cmd/apps/skysocks-client/README.md index 4beef86dc..16d890f6d 100644 --- a/cmd/apps/skysocks-client/README.md +++ b/cmd/apps/skysocks-client/README.md @@ -2,7 +2,7 @@ `skysocks-client` app implements client for the SOCKS5 app. -It opens persistent `skywire` connection to the configured remote node +It opens persistent `skywire` connection to the configured remote visor and local TCP port, all incoming TCP traffics is forwarded to the ~skywire~ connection. diff --git a/cmd/apps/skysocks/README.md b/cmd/apps/skysocks/README.md index 7e7432982..f69f0e60a 100644 --- a/cmd/apps/skysocks/README.md +++ b/cmd/apps/skysocks/README.md @@ -10,7 +10,7 @@ If none are provided, the server does not require authentication. ## Local setup -Create 2 node config files: +Create 2 visor config files: - `skywire1.json` @@ -44,7 +44,7 @@ Create 2 node config files: } ``` -Compile binaries and start 2 nodes: +Compile binaries and start 2 visors: ```sh $ go build -o apps/skysocks.v1.0 ./cmd/apps/skysocks @@ -53,7 +53,7 @@ $ ./skywire-visor skywire1.json $ ./skywire-visor skywire2.json ``` -You should be able to connect to a secondary node via `curl`: +You should be able to connect to a secondary visor via `curl`: ```sh $ curl -v -x socks5://123456:@localhost:1080 https://api.ipify.org diff --git a/cmd/hypervisor/README.md b/cmd/hypervisor/README.md index 3a9a7b2e3..e45a70cb3 100644 --- a/cmd/hypervisor/README.md +++ b/cmd/hypervisor/README.md @@ -1,6 +1,6 @@ # Hypervisor -Hypervisor exposes node management operations via web API. +Hypervisor exposes visor management operations via web API. **Generate config file:** diff --git a/cmd/hypervisor/commands/root.go b/cmd/hypervisor/commands/root.go index 295eb4f4b..ae39b5cd7 100644 --- a/cmd/hypervisor/commands/root.go +++ b/cmd/hypervisor/commands/root.go @@ -25,7 +25,7 @@ var ( configPath string mock bool mockEnableAuth bool - mockNodes int + mockVisors int mockMaxTps int mockMaxRoutes int ) @@ -35,15 +35,15 @@ func init() { rootCmd.Flags().StringVarP(&configPath, "config", "c", "./hypervisor-config.json", "hypervisor config path") rootCmd.Flags().BoolVarP(&mock, "mock", "m", false, "whether to run hypervisor with mock data") rootCmd.Flags().BoolVar(&mockEnableAuth, "mock-enable-auth", false, "whether to enable user management in mock mode") - rootCmd.Flags().IntVar(&mockNodes, "mock-nodes", 5, "number of app nodes to have in mock mode") - rootCmd.Flags().IntVar(&mockMaxTps, "mock-max-tps", 10, "max number of transports per mock app node") - rootCmd.Flags().IntVar(&mockMaxRoutes, "mock-max-routes", 30, "max number of routes per node") + rootCmd.Flags().IntVar(&mockVisors, "mock-visors", 5, "number of app visors to have in mock mode") + rootCmd.Flags().IntVar(&mockMaxTps, "mock-max-tps", 10, "max number of transports per mock app visor") + rootCmd.Flags().IntVar(&mockMaxRoutes, "mock-max-routes", 30, "max number of routes per visor") } // nolint:gochecknoglobals var rootCmd = &cobra.Command{ Use: "hypervisor", - Short: "Manages Skywire App Nodes", + Short: "Manages Skywire App Visors", Run: func(_ *cobra.Command, args []string) { if _, err := buildinfo.Get().WriteTo(os.Stdout); err != nil { log.Printf("Failed to output build info: %v", err) @@ -66,7 +66,7 @@ var rootCmd = &cobra.Command{ rpcAddr = config.Interfaces.RPCAddr ) - m, err := hypervisor.NewNode(config) + m, err := hypervisor.NewVisor(config) if err != nil { log.Fatalln("Failed to start hypervisor:", err) } @@ -93,10 +93,10 @@ var rootCmd = &cobra.Command{ if mock { err := m.AddMockData(hypervisor.MockConfig{ - Nodes: mockNodes, - MaxTpsPerNode: mockMaxTps, - MaxRoutesPerNode: mockMaxRoutes, - EnableAuth: mockEnableAuth, + Visors: mockVisors, + MaxTpsPerVisor: mockMaxTps, + MaxRoutesPerVisor: mockMaxRoutes, + EnableAuth: mockEnableAuth, }) if err != nil { log.Fatalln("Failed to add mock data:", err) diff --git a/cmd/hypervisor/hypervisor.postman_collection.json b/cmd/hypervisor/hypervisor.postman_collection.json index 665b29e9a..3a87b5e03 100644 --- a/cmd/hypervisor/hypervisor.postman_collection.json +++ b/cmd/hypervisor/hypervisor.postman_collection.json @@ -6,7 +6,7 @@ }, "item": [ { - "name": "/api/nodes", + "name": "/api/visors", "request": { "method": "GET", "header": [], @@ -15,7 +15,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes", + "raw": "http://localhost:8080/api/visors", "protocol": "http", "host": [ "localhost" @@ -23,14 +23,14 @@ "port": "8080", "path": [ "api", - "nodes" + "visors" ] }, - "description": "Provides a summary of all connected app nodes." + "description": "Provides a summary of all connected app visors." }, "response": [ { - "name": "/api/nodes", + "name": "/api/visors", "originalRequest": { "method": "GET", "header": [], @@ -39,7 +39,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes", + "raw": "http://localhost:8080/api/visors", "protocol": "http", "host": [ "localhost" @@ -47,7 +47,7 @@ "port": "8080", "path": [ "api", - "nodes" + "visors" ] } }, @@ -74,7 +74,7 @@ ] }, { - "name": "/api/nodes/{pk}", + "name": "/api/visors/{pk}", "request": { "method": "GET", "header": [], @@ -83,7 +83,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02", + "raw": "http://localhost:8080/api/visors/021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02", "protocol": "http", "host": [ "localhost" @@ -91,7 +91,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02" ], "query": [ @@ -102,11 +102,11 @@ } ] }, - "description": "Provides a summary of a given connected app node of public key." + "description": "Provides a summary of a given connected app visor of public key." }, "response": [ { - "name": "/api/nodes/:pk", + "name": "/api/visors/:pk", "originalRequest": { "method": "GET", "header": [], @@ -115,7 +115,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2", + "raw": "http://localhost:8080/api/visors/02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2", "protocol": "http", "host": [ "localhost" @@ -123,7 +123,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2" ], "query": [ @@ -158,7 +158,7 @@ ] }, { - "name": "/api/nodes/{pk}/apps", + "name": "/api/visors/{pk}/apps", "request": { "method": "GET", "header": [], @@ -167,7 +167,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02/apps", + "raw": "http://localhost:8080/api/visors/021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02/apps", "protocol": "http", "host": [ "localhost" @@ -175,16 +175,16 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02", "apps" ] }, - "description": "Provides a summary of an AppNode's apps." + "description": "Provides a summary of an AppVisor's apps." }, "response": [ { - "name": "/api/nodes/:pk/apps", + "name": "/api/visors/:pk/apps", "originalRequest": { "method": "GET", "header": [], @@ -193,7 +193,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2/apps", + "raw": "http://localhost:8080/api/visors/02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2/apps", "protocol": "http", "host": [ "localhost" @@ -201,7 +201,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2", "apps" ] @@ -230,7 +230,7 @@ ] }, { - "name": "/api/nodes/{pk}/apps/{app}", + "name": "/api/visors/{pk}/apps/{app}", "request": { "method": "GET", "header": [], @@ -239,7 +239,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02/apps/foo.v1.0", + "raw": "http://localhost:8080/api/visors/021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02/apps/foo.v1.0", "protocol": "http", "host": [ "localhost" @@ -247,17 +247,17 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "021c535e45756c63151820c8f31bfbb0efd6d7d49305e133e1650aae889d60ff02", "apps", "foo.v1.0" ] }, - "description": "Starts an app on an AppNode." + "description": "Starts an app on an AppVisor." }, "response": [ { - "name": "/api/nodes/:pk/apps/:app/start", + "name": "/api/visors/:pk/apps/:app/start", "originalRequest": { "method": "POST", "header": [], @@ -266,7 +266,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2/apps/foo.v1.0/start", + "raw": "http://localhost:8080/api/visors/02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2/apps/foo.v1.0/start", "protocol": "http", "host": [ "localhost" @@ -274,7 +274,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2", "apps", "foo.v1.0", @@ -300,12 +300,12 @@ } ], "cookie": [], - "body": "{\n \"node\": \"02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2\",\n \"app\": \"foo.v1.0\",\n \"command\": \"StartApp\",\n \"success\": true\n}" + "body": "{\n \"visor\": \"02500488fc25814e55e6c740537ed14ca677aebe8a883681bae718a36516a7b0a2\",\n \"app\": \"foo.v1.0\",\n \"command\": \"StartApp\",\n \"success\": true\n}" } ] }, { - "name": "/api/nodes/{pk}/apps/{app}", + "name": "/api/visors/{pk}/apps/{app}", "request": { "method": "PUT", "header": [ @@ -321,7 +321,7 @@ "raw": "{\n\t\"autostart\": true,\n\t\"status\": 1\n}" }, "url": { - "raw": "http://localhost:8080/api/nodes/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/apps/foo.v1.0", + "raw": "http://localhost:8080/api/visors/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/apps/foo.v1.0", "protocol": "http", "host": [ "localhost" @@ -329,17 +329,17 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64", "apps", "foo.v1.0" ] }, - "description": "Starts an app on an AppNode." + "description": "Starts an app on an AppVisor." }, "response": [ { - "name": "/api/nodes/{pk}/apps/{app}", + "name": "/api/visors/{pk}/apps/{app}", "originalRequest": { "method": "PUT", "header": [ @@ -355,7 +355,7 @@ "raw": "{\n\t\"autostart\": true,\n\t\"status\": 1\n}" }, "url": { - "raw": "http://localhost:8080/api/nodes/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/apps/foo.v1.0", + "raw": "http://localhost:8080/api/visors/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/apps/foo.v1.0", "protocol": "http", "host": [ "localhost" @@ -363,7 +363,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64", "apps", "foo.v1.0" @@ -393,7 +393,7 @@ ] }, { - "name": "/api/nodes/{pk}/transport-types", + "name": "/api/visors/{pk}/transport-types", "request": { "method": "GET", "header": [], @@ -402,7 +402,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transport-types", + "raw": "http://localhost:8080/api/visors/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transport-types", "protocol": "http", "host": [ "localhost" @@ -410,16 +410,16 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64", "transport-types" ] }, - "description": "Lists supported transport types of the given AppNode." + "description": "Lists supported transport types of the given AppVisor." }, "response": [ { - "name": "/api/nodes/:pk/transport-types", + "name": "/api/visors/:pk/transport-types", "originalRequest": { "method": "GET", "header": [], @@ -428,7 +428,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transport-types", + "raw": "http://localhost:8080/api/visors/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transport-types", "protocol": "http", "host": [ "localhost" @@ -436,7 +436,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2", "transport-types" ] @@ -465,7 +465,7 @@ ] }, { - "name": "/api/nodes/{pk}/transports", + "name": "/api/visors/{pk}/transports", "request": { "method": "GET", "header": [], @@ -474,7 +474,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports?logs=true", + "raw": "http://localhost:8080/api/visors/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports?logs=true", "protocol": "http", "host": [ "localhost" @@ -482,7 +482,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64", "transports" ], @@ -494,11 +494,11 @@ } ] }, - "description": "List transports of given AppNode." + "description": "List transports of given AppVisor." }, "response": [ { - "name": "/api/nodes/:pk/transports", + "name": "/api/visors/:pk/transports", "originalRequest": { "method": "GET", "header": [], @@ -507,7 +507,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports?logs=true", + "raw": "http://localhost:8080/api/visors/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports?logs=true", "protocol": "http", "host": [ "localhost" @@ -515,7 +515,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2", "transports" ], @@ -551,7 +551,7 @@ ] }, { - "name": "/api/nodes/{pk}/transports", + "name": "/api/visors/{pk}/transports", "request": { "method": "POST", "header": [ @@ -567,7 +567,7 @@ "raw": "{\n\t\"remote_pk\": \"03c497380efd87e19208bb484ee322ede1e091b2a0b653e6d25475f641602376a9\",\n\t\"transport_type\": \"native\",\n\t\"public\": true\n}" }, "url": { - "raw": "http://localhost:8080/api/nodes/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports", + "raw": "http://localhost:8080/api/visors/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports", "protocol": "http", "host": [ "localhost" @@ -575,16 +575,16 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64", "transports" ] }, - "description": "Adds a transport to a given AppNode." + "description": "Adds a transport to a given AppVisor." }, "response": [ { - "name": "POST /api/nodes/transports", + "name": "POST /api/visors/transports", "originalRequest": { "method": "POST", "header": [ @@ -600,7 +600,7 @@ "raw": "{\n\t\"remote_pk\": \"03c497380efd87e19208bb484ee322ede1e091b2a0b653e6d25475f641602376a9\",\n\t\"transport_type\": \"native\",\n\t\"public\": true\n}" }, "url": { - "raw": "http://localhost:8080/api/nodes/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports", + "raw": "http://localhost:8080/api/visors/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports", "protocol": "http", "host": [ "localhost" @@ -608,7 +608,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2", "transports" ] @@ -637,7 +637,7 @@ ] }, { - "name": "/api/nodes/{pk}/transports/{tid}", + "name": "/api/visors/{pk}/transports/{tid}", "request": { "method": "GET", "header": [], @@ -646,7 +646,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports/70836b44-f6e5-4c17-a5e8-e1cbef89a10f", + "raw": "http://localhost:8080/api/visors/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports/70836b44-f6e5-4c17-a5e8-e1cbef89a10f", "protocol": "http", "host": [ "localhost" @@ -654,17 +654,17 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64", "transports", "70836b44-f6e5-4c17-a5e8-e1cbef89a10f" ] }, - "description": "Obtains summary of transport of given TransportID and AppNode." + "description": "Obtains summary of transport of given TransportID and AppVisor." }, "response": [ { - "name": "/api/nodes/:pk/transports/:tid", + "name": "/api/visors/:pk/transports/:tid", "originalRequest": { "method": "GET", "header": [], @@ -673,7 +673,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports/2ff2d608-fe14-4c17-938c-3af8afd053ae", + "raw": "http://localhost:8080/api/visors/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports/2ff2d608-fe14-4c17-938c-3af8afd053ae", "protocol": "http", "host": [ "localhost" @@ -681,7 +681,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2", "transports", "2ff2d608-fe14-4c17-938c-3af8afd053ae" @@ -711,7 +711,7 @@ ] }, { - "name": "DELETE /api/nodes/{pk}/transports/{tid}", + "name": "DELETE /api/visors/{pk}/transports/{tid}", "request": { "method": "DELETE", "header": [], @@ -720,7 +720,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports/d5ace20e-06c8-4867-bda2-9449459a9e5a", + "raw": "http://localhost:8080/api/visors/023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64/transports/d5ace20e-06c8-4867-bda2-9449459a9e5a", "protocol": "http", "host": [ "localhost" @@ -728,17 +728,17 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "023ab9f45c0eb3625f9848a9ff6822c6d0965a94d3e7955f1869f38153df0e5b64", "transports", "d5ace20e-06c8-4867-bda2-9449459a9e5a" ] }, - "description": "Removes transport of given TransportID and AppNode." + "description": "Removes transport of given TransportID and AppVisor." }, "response": [ { - "name": "DELETE /api/nodes/:pk/transports/:tid", + "name": "DELETE /api/visors/:pk/transports/:tid", "originalRequest": { "method": "DELETE", "header": [], @@ -747,7 +747,7 @@ "raw": "" }, "url": { - "raw": "http://localhost:8080/api/nodes/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports/2ff2d608-fe14-4c17-938c-3af8afd053ae", + "raw": "http://localhost:8080/api/visors/039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2/transports/2ff2d608-fe14-4c17-938c-3af8afd053ae", "protocol": "http", "host": [ "localhost" @@ -755,7 +755,7 @@ "port": "8080", "path": [ "api", - "nodes", + "visors", "039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2", "transports", "2ff2d608-fe14-4c17-938c-3af8afd053ae" @@ -780,7 +780,7 @@ } ], "cookie": [], - "body": "{\n \"node\": \"039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2\",\n \"transport\": \"2ff2d608-fe14-4c17-938c-3af8afd053ae\",\n \"command\": \"RemoveTransport\",\n \"success\": true\n}" + "body": "{\n \"visor\": \"039337a306ffbd6a7495f79b65aec91ce65756e4fd1cb2cd726840b2eee4fa59c2\",\n \"transport\": \"2ff2d608-fe14-4c17-938c-3af8afd053ae\",\n \"command\": \"RemoveTransport\",\n \"success\": true\n}" } ] } diff --git a/cmd/setup-node/commands/root.go b/cmd/setup-node/commands/root.go index 5466d441b..9322141ac 100644 --- a/cmd/setup-node/commands/root.go +++ b/cmd/setup-node/commands/root.go @@ -27,8 +27,8 @@ var ( ) var rootCmd = &cobra.Command{ - Use: "setup-node [config.json]", - Short: "Route Setup Node for skywire", + Use: "setup-visor [config.json]", + Short: "Route Setup Visor for skywire", Run: func(_ *cobra.Command, args []string) { if _, err := buildinfo.Get().WriteTo(log.Writer()); err != nil { log.Printf("Failed to output build info: %v", err) @@ -68,7 +68,7 @@ var rootCmd = &cobra.Command{ sn, err := setup.NewNode(conf, metrics.NewPrometheus("setupnode")) if err != nil { - logger.Fatal("Failed to setup Node: ", err) + logger.Fatal("Failed to setup Visor: ", err) } go func() { @@ -85,7 +85,7 @@ var rootCmd = &cobra.Command{ func init() { rootCmd.Flags().StringVarP(&metricsAddr, "metrics", "m", ":2121", "address to bind metrics API to") rootCmd.Flags().StringVar(&syslogAddr, "syslog", "", "syslog server address. E.g. localhost:514") - rootCmd.Flags().StringVar(&tag, "tag", "setup-node", "logging tag") + rootCmd.Flags().StringVar(&tag, "tag", "setup-visor", "logging tag") rootCmd.Flags().BoolVarP(&cfgFromStdin, "stdin", "i", false, "read config from STDIN") } diff --git a/cmd/skywire-cli/commands/mdisc/root.go b/cmd/skywire-cli/commands/mdisc/root.go index 761cefba6..a3c56c980 100644 --- a/cmd/skywire-cli/commands/mdisc/root.go +++ b/cmd/skywire-cli/commands/mdisc/root.go @@ -35,13 +35,13 @@ func init() { } var entryCmd = &cobra.Command{ - Use: "entry ", + Use: "entry ", Short: "fetches an entry from DMSG discovery", Args: cobra.MinimumNArgs(1), Run: func(_ *cobra.Command, args []string) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - pk := internal.ParsePK("node-public-key", args[0]) + pk := internal.ParsePK("visor-public-key", args[0]) entry, err := disc.NewHTTP(mdAddr).Entry(ctx, pk) internal.Catch(err) fmt.Println(entry) diff --git a/cmd/skywire-cli/commands/root.go b/cmd/skywire-cli/commands/root.go index 72a7713d2..ffa1cf72c 100644 --- a/cmd/skywire-cli/commands/root.go +++ b/cmd/skywire-cli/commands/root.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/SkycoinProject/skywire-mainnet/cmd/skywire-cli/commands/mdisc" - "github.com/SkycoinProject/skywire-mainnet/cmd/skywire-cli/commands/node" + "github.com/SkycoinProject/skywire-mainnet/cmd/skywire-cli/commands/visor" "github.com/SkycoinProject/skywire-mainnet/cmd/skywire-cli/commands/rtfind" ) @@ -17,7 +17,7 @@ var rootCmd = &cobra.Command{ func init() { rootCmd.AddCommand( - node.RootCmd, + visor.RootCmd, mdisc.RootCmd, rtfind.RootCmd, ) diff --git a/cmd/skywire-cli/commands/rtfind/root.go b/cmd/skywire-cli/commands/rtfind/root.go index 98a1ab784..f4a513cea 100644 --- a/cmd/skywire-cli/commands/rtfind/root.go +++ b/cmd/skywire-cli/commands/rtfind/root.go @@ -28,8 +28,8 @@ func init() { // RootCmd is the command that queries the route finder. var RootCmd = &cobra.Command{ - Use: "rtfind ", - Short: "Queries the Route Finder for available routes between two nodes", + Use: "rtfind ", + Short: "Queries the Route Finder for available routes between two visors", Args: cobra.MinimumNArgs(2), Run: func(_ *cobra.Command, args []string) { rfc := rfclient.NewHTTP(frAddr, timeout) diff --git a/cmd/skywire-cli/commands/node/app.go b/cmd/skywire-cli/commands/visor/app.go similarity index 97% rename from cmd/skywire-cli/commands/node/app.go rename to cmd/skywire-cli/commands/visor/app.go index 7fdb55613..83bc7e13f 100644 --- a/cmd/skywire-cli/commands/node/app.go +++ b/cmd/skywire-cli/commands/visor/app.go @@ -1,4 +1,4 @@ -package node +package visor import ( "fmt" @@ -27,7 +27,7 @@ func init() { var lsAppsCmd = &cobra.Command{ Use: "ls-apps", - Short: "Lists apps running on the local node", + Short: "Lists apps running on the local visor", Run: func(_ *cobra.Command, _ []string) { states, err := rpcClient().Apps() internal.Catch(err) diff --git a/cmd/skywire-cli/commands/node/gen-config.go b/cmd/skywire-cli/commands/visor/gen-config.go similarity index 95% rename from cmd/skywire-cli/commands/node/gen-config.go rename to cmd/skywire-cli/commands/visor/gen-config.go index 7431eb640..b27c2b6bc 100644 --- a/cmd/skywire-cli/commands/node/gen-config.go +++ b/cmd/skywire-cli/commands/visor/gen-config.go @@ -1,4 +1,4 @@ -package node +package visor import ( "errors" @@ -40,7 +40,7 @@ var genConfigCmd = &cobra.Command{ Short: "Generates a config file", PreRun: func(_ *cobra.Command, _ []string) { if output == "" { - output = pathutil.NodeDefaults().Get(configLocType) + output = pathutil.Defaults().Get(configLocType) log.Infof("No 'output' set; using default path: %s", output) } var err error @@ -83,8 +83,8 @@ func defaultConfig() *visor.Config { conf.Version = "1.0" pk, sk := cipher.GenerateKeyPair() - conf.Node.StaticPubKey = pk - conf.Node.StaticSecKey = sk + conf.Visor.StaticPubKey = pk + conf.Visor.StaticSecKey = sk lIPaddr, err := getLocalIPAddress() if err != nil { @@ -110,7 +110,7 @@ func defaultConfig() *visor.Config { defaultSkysocksConfig(""), defaultSkysocksClientConfig(), } - conf.TrustedNodes = []cipher.PubKey{} + conf.Trusteds = []cipher.PubKey{} if testenv { conf.Transport.Discovery = skyenv.TestTpDiscAddr @@ -129,7 +129,7 @@ func defaultConfig() *visor.Config { var sPK cipher.PubKey if err := sPK.UnmarshalText([]byte(skyenv.DefaultSetupPK)); err != nil { - log.WithError(err).Warnf("Failed to unmarshal default setup node public key %s", skyenv.DefaultSetupPK) + log.WithError(err).Warnf("Failed to unmarshal default setup visor public key %s", skyenv.DefaultSetupPK) } conf.Routing.SetupNodes = []cipher.PubKey{sPK} conf.Routing.RouteFinderTimeout = visor.Duration(10 * time.Second) diff --git a/cmd/skywire-cli/commands/node/pk.go b/cmd/skywire-cli/commands/visor/pk.go similarity index 84% rename from cmd/skywire-cli/commands/node/pk.go rename to cmd/skywire-cli/commands/visor/pk.go index f87379b9e..f8ad77114 100644 --- a/cmd/skywire-cli/commands/node/pk.go +++ b/cmd/skywire-cli/commands/visor/pk.go @@ -1,4 +1,4 @@ -package node +package visor import ( "fmt" @@ -12,7 +12,7 @@ func init() { var pkCmd = &cobra.Command{ Use: "pk", - Short: "Obtains the public key of the node", + Short: "Obtains the public key of the visor", Run: func(_ *cobra.Command, _ []string) { client := rpcClient() diff --git a/cmd/skywire-cli/commands/node/root.go b/cmd/skywire-cli/commands/visor/root.go similarity index 96% rename from cmd/skywire-cli/commands/node/root.go rename to cmd/skywire-cli/commands/visor/root.go index 165d5d06b..b25c96579 100644 --- a/cmd/skywire-cli/commands/node/root.go +++ b/cmd/skywire-cli/commands/visor/root.go @@ -1,4 +1,4 @@ -package node +package visor import ( "net" @@ -21,7 +21,7 @@ func init() { // RootCmd contains commands that interact with the skywire-visor var RootCmd = &cobra.Command{ - Use: "node", + Use: "visor", Short: "Contains sub-commands that interact with the local Skywire Visor", } diff --git a/cmd/skywire-cli/commands/node/routes.go b/cmd/skywire-cli/commands/visor/routes.go similarity index 98% rename from cmd/skywire-cli/commands/node/routes.go rename to cmd/skywire-cli/commands/visor/routes.go index b12db1bfd..66e8a808a 100644 --- a/cmd/skywire-cli/commands/node/routes.go +++ b/cmd/skywire-cli/commands/visor/routes.go @@ -1,4 +1,4 @@ -package node +package visor import ( "errors" @@ -27,7 +27,7 @@ func init() { var lsRulesCmd = &cobra.Command{ Use: "ls-rules", - Short: "Lists the local node's routing rules", + Short: "Lists the local visor's routing rules", Run: func(_ *cobra.Command, _ []string) { rules, err := rpcClient().RoutingRules() internal.Catch(err) diff --git a/cmd/skywire-cli/commands/node/transport_discovery.go b/cmd/skywire-cli/commands/visor/transport_discovery.go similarity index 99% rename from cmd/skywire-cli/commands/node/transport_discovery.go rename to cmd/skywire-cli/commands/visor/transport_discovery.go index 9daa115eb..1bbf91a6c 100644 --- a/cmd/skywire-cli/commands/node/transport_discovery.go +++ b/cmd/skywire-cli/commands/visor/transport_discovery.go @@ -1,4 +1,4 @@ -package node +package visor import ( "errors" diff --git a/cmd/skywire-cli/commands/node/transports.go b/cmd/skywire-cli/commands/visor/transports.go similarity index 95% rename from cmd/skywire-cli/commands/node/transports.go rename to cmd/skywire-cli/commands/visor/transports.go index d6c4fc375..a02b3333e 100644 --- a/cmd/skywire-cli/commands/node/transports.go +++ b/cmd/skywire-cli/commands/visor/transports.go @@ -1,4 +1,4 @@ -package node +package visor import ( "fmt" @@ -27,7 +27,7 @@ func init() { var lsTypesCmd = &cobra.Command{ Use: "ls-types", - Short: "Lists transport types used by the local node", + Short: "Lists transport types used by the local visor", Run: func(_ *cobra.Command, _ []string) { types, err := rpcClient().TransportTypes() internal.Catch(err) @@ -45,7 +45,7 @@ var ( func init() { lsTpCmd.Flags().StringSliceVar(&filterTypes, "filter-types", filterTypes, "comma-separated; if specified, only shows transports of given types") - lsTpCmd.Flags().Var(&filterPubKeys, "filter-pks", "comma-separated; if specified, only shows transports associated with given nodes") + lsTpCmd.Flags().Var(&filterPubKeys, "filter-pks", "comma-separated; if specified, only shows transports associated with given visors") lsTpCmd.Flags().BoolVar(&showLogs, "show-logs", true, "whether to show transport logs in output") } diff --git a/cmd/skywire-visor/commands/root.go b/cmd/skywire-visor/commands/root.go index 41f1faf9c..c11d9d9b1 100644 --- a/cmd/skywire-visor/commands/root.go +++ b/cmd/skywire-visor/commands/root.go @@ -49,7 +49,7 @@ type runCfg struct { logger *logging.Logger masterLogger *logging.MasterLogger conf visor.Config - node *visor.Node + visor *visor.Visor restartCtx *restart.Context } @@ -68,9 +68,9 @@ var rootCmd = &cobra.Command{ cfg.startProfiler(). startLogger(). readConfig(). - runNode(). + runVisor(). waitOsSignals(). - stopNode() + stopVisor() }, Version: buildinfo.Get().Version, } @@ -147,7 +147,7 @@ func (cfg *runCfg) readConfig() *runCfg { var rdr io.Reader if !cfg.cfgFromStdin { - configPath := pathutil.FindConfigPath(cfg.args, 0, configEnv, pathutil.NodeDefaults()) + configPath := pathutil.FindConfigPath(cfg.args, 0, configEnv, pathutil.VisorDefaults()) file, err := os.Open(filepath.Clean(configPath)) if err != nil { @@ -179,7 +179,7 @@ func (cfg *runCfg) readConfig() *runCfg { return cfg } -func (cfg *runCfg) runNode() *runCfg { +func (cfg *runCfg) runVisor() *runCfg { startDelay, err := time.ParseDuration(cfg.startDelay) if err != nil { cfg.logger.Warnf("Using no visor start delay due to parsing failure: %v", err) @@ -203,13 +203,13 @@ func (cfg *runCfg) runNode() *runCfg { cfg.logger.Fatal("failed to unlink socket files: ", err) } - node, err := visor.NewNode(&cfg.conf, cfg.masterLogger, cfg.restartCtx, cfg.configPath) + visor, err := visor.NewVisor(&cfg.conf, cfg.masterLogger, cfg.restartCtx, cfg.configPath) if err != nil { - cfg.logger.Fatal("Failed to initialize node: ", err) + cfg.logger.Fatal("Failed to initialize visor: ", err) } if cfg.conf.Uptime.Tracker != "" { - uptimeTracker, err := utclient.NewHTTP(cfg.conf.Uptime.Tracker, cfg.conf.Node.StaticPubKey, cfg.conf.Node.StaticSecKey) + uptimeTracker, err := utclient.NewHTTP(cfg.conf.Uptime.Tracker, cfg.conf.Visor.StaticPubKey, cfg.conf.Visor.StaticSecKey) if err != nil { cfg.logger.Error("Failed to connect to uptime tracker: ", err) } else { @@ -218,8 +218,8 @@ func (cfg *runCfg) runNode() *runCfg { go func() { for range ticker.C { ctx := context.Background() - if err := uptimeTracker.UpdateNodeUptime(ctx); err != nil { - cfg.logger.Error("Failed to update node uptime: ", err) + if err := uptimeTracker.UpdateVisorUptime(ctx); err != nil { + cfg.logger.Error("Failed to update visor uptime: ", err) } } }() @@ -227,8 +227,8 @@ func (cfg *runCfg) runNode() *runCfg { } go func() { - if err := node.Start(); err != nil { - cfg.logger.Fatal("Failed to start node: ", err) + if err := visor.Start(); err != nil { + cfg.logger.Fatal("Failed to start visor: ", err) } }() @@ -236,17 +236,17 @@ func (cfg *runCfg) runNode() *runCfg { cfg.conf.ShutdownTimeout = defaultShutdownTimeout } - cfg.node = node + cfg.visor = visor return cfg } -func (cfg *runCfg) stopNode() *runCfg { +func (cfg *runCfg) stopVisor() *runCfg { defer cfg.profileStop() - if err := cfg.node.Close(); err != nil { + if err := cfg.visor.Close(); err != nil { if !strings.Contains(err.Error(), "closed") { - cfg.logger.Fatal("Failed to close node: ", err) + cfg.logger.Fatal("Failed to close visor: ", err) } } diff --git a/cmd/skywire-visor/config.json b/cmd/skywire-visor/config.json index 864c31925..fb08e550b 100644 --- a/cmd/skywire-visor/config.json +++ b/cmd/skywire-visor/config.json @@ -1,6 +1,6 @@ { "version": "1.0", - "node": { + "visor": { "static_public_key": "024ec47420176680816e0406250e7156465e4531f5b26057c9f6297bb0303558c7", "static_secret_key": "42bca4df2f3189b28872d40e6c61aacd5e85b8e91f8fea65780af27c142419e5" }, diff --git a/docker/images/node/Dockerfile b/docker/images/visor/Dockerfile similarity index 89% rename from docker/images/node/Dockerfile rename to docker/images/visor/Dockerfile index 0e426d476..fa4cea791 100644 --- a/docker/images/node/Dockerfile +++ b/docker/images/visor/Dockerfile @@ -23,11 +23,11 @@ RUN go build -mod=vendor -tags netgo -ldflags="-w -s" \ ## Resulting image -FROM ${base} as node-runner +FROM ${base} as visor-runner COPY --from=builder /go/skywire/skywire-visor skywire-visor COPY --from=builder /go/skywire/apps bin/apps -COPY --from=builder /go/skywire/docker/images/node/update.sh update.sh +COPY --from=builder /go/skywire/docker/images/visor/update.sh update.sh COPY --from=builder /go/skywire/skywire-cli bin/skywire-cli RUN ./update.sh @@ -35,4 +35,4 @@ RUN ./update.sh ENTRYPOINT [ "./skywire-visor" ] # default target -FROM node-runner +FROM visor-runner diff --git a/docker/images/node/update.sh b/docker/images/visor/update.sh similarity index 100% rename from docker/images/node/update.sh rename to docker/images/visor/update.sh diff --git a/integration/start-restart-nodeB.sh b/integration/start-restart-nodeB.sh index 4b6c20f39..b87ec1a76 100644 --- a/integration/start-restart-nodeB.sh +++ b/integration/start-restart-nodeB.sh @@ -5,7 +5,7 @@ echo Press Ctrl-C to exit for ((;;)) do ./bin/skywire-visor ./integration/intermediary-nodeB.json --tag NodeB 2>> ./logs/nodeB.log >> ./logs/nodeB.log & - echo node starting NodeB + echo visor starting NodeB sleep 25 echo Killing NodeB on $(ps aux |grep "[N]odeB" |awk '{print $2}') kill $(ps aux |grep "[N]odeB" |awk '{print $2}') diff --git a/integration/startup.sh b/integration/startup.sh index 1ca41aa4c..caf69b1e7 100644 --- a/integration/startup.sh +++ b/integration/startup.sh @@ -4,12 +4,12 @@ # - inside tmux session created by run-*-env.sh scripts # - or standalone `source ./integration/[name of environment]/env-vars.sh && ./integration/startup.sh` -./skywire-cli --rpc $RPC_A node add-tp $PK_B -./skywire-cli --rpc $RPC_C node add-tp $PK_B +./skywire-cli --rpc $RPC_A visor add-tp $PK_B +./skywire-cli --rpc $RPC_C visor add-tp $PK_B sleep 1 echo "NodeA Transports:" -./skywire-cli --rpc $RPC_A node ls-tp +./skywire-cli --rpc $RPC_A visor ls-tp echo "NodeB Transports:" -./skywire-cli --rpc $RPC_B node ls-tp +./skywire-cli --rpc $RPC_B visor ls-tp diff --git a/internal/skyenv/const.go b/internal/skyenv/const.go index 597a593ba..47389a211 100644 --- a/internal/skyenv/const.go +++ b/internal/skyenv/const.go @@ -22,9 +22,9 @@ const ( // Default dmsg ports. const ( - DmsgSetupPort = uint16(36) // Listening port of a setup node. - DmsgAwaitSetupPort = uint16(136) // Listening port of a visor node for setup operations. - DmsgTransportPort = uint16(45) // Listening port of a visor node for incoming transports. + DmsgSetupPort = uint16(36) // Listening port of a setup visor. + DmsgAwaitSetupPort = uint16(136) // Listening port of a visor visor for setup operations. + DmsgTransportPort = uint16(45) // Listening port of a visor visor for incoming transports. ) // Default dmsgpty constants. diff --git a/internal/utclient/client.go b/internal/utclient/client.go index 63d54e467..16af06151 100644 --- a/internal/utclient/client.go +++ b/internal/utclient/client.go @@ -26,7 +26,7 @@ type Error struct { // APIClient implements DMSG discovery API client. type APIClient interface { - UpdateNodeUptime(context.Context) error + UpdateVisorUptime(context.Context) error } // httpClient implements Client for uptime tracker API. @@ -61,8 +61,8 @@ func (c *httpClient) Get(ctx context.Context, path string) (*http.Response, erro return c.client.Do(req.WithContext(ctx)) } -// UpdateNodeUptime updates node uptime. -func (c *httpClient) UpdateNodeUptime(ctx context.Context) error { +// UpdateVisorUptime updates visor uptime. +func (c *httpClient) UpdateVisorUptime(ctx context.Context) error { resp, err := c.Get(ctx, "/update") if resp != nil { defer func() { diff --git a/internal/utclient/client_test.go b/internal/utclient/client_test.go index 2ffe44485..d99903723 100644 --- a/internal/utclient/client_test.go +++ b/internal/utclient/client_test.go @@ -67,7 +67,7 @@ func TestUpdateNodeUptime(t *testing.T) { c, err := NewHTTP(srv.URL, testPubKey, testSecKey) require.NoError(t, err) - err = c.UpdateNodeUptime(context.TODO()) + err = c.UpdateVisorUptime(context.TODO()) require.NoError(t, err) assert.Equal(t, "/update", <-urlCh) diff --git a/pkg/app/appcommon/env.go b/pkg/app/appcommon/env.go index 0f934aaa9..2c4410775 100644 --- a/pkg/app/appcommon/env.go +++ b/pkg/app/appcommon/env.go @@ -5,6 +5,6 @@ const ( EnvAppKey = "APP_KEY" // EnvSockFile is a name for env arg containing unix socket file name. EnvSockFile = "SW_UNIX" - // EnvVisorPK is a name for env arg containing public key of visor node. + // EnvVisorPK is a name for env arg containing public key of visor visor. EnvVisorPK = "VISOR_PK" ) diff --git a/pkg/app/appcommon/procid.go b/pkg/app/appcommon/procid.go index 166059f0e..8ae3bc0bf 100644 --- a/pkg/app/appcommon/procid.go +++ b/pkg/app/appcommon/procid.go @@ -1,6 +1,6 @@ package appcommon // ProcID identifies the current instance of an app (an app process). -// The visor node is responsible for starting apps, and the started process +// The visor visor is responsible for starting apps, and the started process // should be provided with a ProcID. type ProcID uint16 diff --git a/pkg/app/appnet/addr.go b/pkg/app/appnet/addr.go index 24b7c9742..040554741 100644 --- a/pkg/app/appnet/addr.go +++ b/pkg/app/appnet/addr.go @@ -29,7 +29,7 @@ func (a Addr) Network() string { return string(a.Net) } -// String returns public key and port of node split by colon. +// String returns public key and port of visor split by colon. func (a Addr) String() string { if a.Port == 0 { return fmt.Sprintf("%s:~", a.PubKey) diff --git a/pkg/app/client.go b/pkg/app/client.go index 1b6e319a6..50cbc3f04 100644 --- a/pkg/app/client.go +++ b/pkg/app/client.go @@ -90,7 +90,7 @@ func NewClient(log *logging.Logger, config ClientConfig) (*Client, error) { }, nil } -// Dial dials the remote node using `remote`. +// Dial dials the remote visor using `remote`. func (c *Client) Dial(remote appnet.Addr) (net.Conn, error) { connID, localPort, err := c.rpc.Dial(remote) if err != nil { diff --git a/pkg/app/doc.go b/pkg/app/doc.go index 678549105..297f2a0c1 100644 --- a/pkg/app/doc.go +++ b/pkg/app/doc.go @@ -1,4 +1,4 @@ // Package app provides facilities to establish communication -// between a visor node and a skywire application. Intended to +// between a visor visor and a skywire application. Intended to // replace the original `app` module. package app diff --git a/pkg/hypervisor/hypervisor.go b/pkg/hypervisor/hypervisor.go index 71901f80e..a9368df8a 100644 --- a/pkg/hypervisor/hypervisor.go +++ b/pkg/hypervisor/hypervisor.go @@ -1,4 +1,4 @@ -// Package hypervisor implements management node +// Package hypervisor implements management visor package hypervisor import ( @@ -41,21 +41,21 @@ var ( log = logging.MustGetLogger("hypervisor") // nolint: gochecknoglobals ) -type appNodeConn struct { +type appVisorConn struct { Addr dmsg.Addr Client visor.RPCClient } -// Node manages AppNodes. -type Node struct { - c Config - nodes map[cipher.PubKey]appNodeConn // connected remote nodes. - users *UserManager - mu *sync.RWMutex +// Visor manages AppVides. +type Visor struct { + c Config + visors map[cipher.PubKey]appVisorConn // connected remote visors. + users *UserManager + mu *sync.RWMutex } -// NewNode creates a new Node. -func NewNode(config Config) (*Node, error) { +// NewVisor creates a new Visor. +func NewVisor(config Config) (*Visor, error) { boltUserDB, err := NewBoltUserStore(config.DBPath) if err != nil { return nil, err @@ -63,16 +63,16 @@ func NewNode(config Config) (*Node, error) { singleUserDB := NewSingleUserStore("admin", boltUserDB) - return &Node{ - c: config, - nodes: make(map[cipher.PubKey]appNodeConn), - users: NewUserManager(singleUserDB, config.Cookies), - mu: new(sync.RWMutex), + return &Visor{ + c: config, + visors: make(map[cipher.PubKey]appVisorConn), + users: NewUserManager(singleUserDB, config.Cookies), + mu: new(sync.RWMutex), }, nil } -// ServeRPC serves RPC of a Node. -func (m *Node) ServeRPC(lis *dmsg.Listener) error { +// ServeRPC serves RPC of a Visor. +func (m *Visor) ServeRPC(lis *dmsg.Listener) error { for { conn, err := lis.Accept() if err != nil { @@ -82,7 +82,7 @@ func (m *Node) ServeRPC(lis *dmsg.Listener) error { addr := conn.RemoteAddr().(dmsg.Addr) log.Infoln("accepted: ", addr.PK) m.mu.Lock() - m.nodes[addr.PK] = appNodeConn{ + m.visors[addr.PK] = appVisorConn{ Addr: addr, Client: visor.NewRPCClient(rpc.NewClient(conn), visor.RPCPrefix), } @@ -92,24 +92,24 @@ func (m *Node) ServeRPC(lis *dmsg.Listener) error { // MockConfig configures how mock data is to be added. type MockConfig struct { - Nodes int - MaxTpsPerNode int - MaxRoutesPerNode int - EnableAuth bool + Visors int + MaxTpsPerVisor int + MaxRoutesPerVisor int + EnableAuth bool } -// AddMockData adds mock data to Node. -func (m *Node) AddMockData(config MockConfig) error { +// AddMockData adds mock data to Visor. +func (m *Visor) AddMockData(config MockConfig) error { r := rand.New(rand.NewSource(time.Now().UnixNano())) - for i := 0; i < config.Nodes; i++ { - pk, client, err := visor.NewMockRPCClient(r, config.MaxTpsPerNode, config.MaxRoutesPerNode) + for i := 0; i < config.Visors; i++ { + pk, client, err := visor.NewMockRPCClient(r, config.MaxTpsPerVisor, config.MaxRoutesPerVisor) if err != nil { return err } m.mu.Lock() - m.nodes[pk] = appNodeConn{ + m.visors[pk] = appVisorConn{ Addr: dmsg.Addr{ PK: pk, Port: uint16(i), @@ -125,7 +125,7 @@ func (m *Node) AddMockData(config MockConfig) error { } // ServeHTTP implements http.Handler -func (m *Node) ServeHTTP(w http.ResponseWriter, req *http.Request) { +func (m *Visor) ServeHTTP(w http.ResponseWriter, req *http.Request) { r := chi.NewRouter() r.Use(middleware.Timeout(httpTimeout)) @@ -147,42 +147,42 @@ func (m *Node) ServeHTTP(w http.ResponseWriter, req *http.Request) { r.Get("/user", m.users.UserInfo()) r.Post("/change-password", m.users.ChangePassword()) - r.Get("/nodes", m.getNodes()) - r.Get("/nodes/{pk}", m.getNode()) - r.Get("/nodes/{pk}/health", m.getHealth()) - r.Get("/nodes/{pk}/uptime", m.getUptime()) - r.Get("/nodes/{pk}/apps", m.getApps()) - r.Get("/nodes/{pk}/apps/{app}", m.getApp()) - r.Put("/nodes/{pk}/apps/{app}", m.putApp()) - r.Get("/nodes/{pk}/apps/{app}/logs", m.appLogsSince()) - r.Get("/nodes/{pk}/transport-types", m.getTransportTypes()) - r.Get("/nodes/{pk}/transports", m.getTransports()) - r.Post("/nodes/{pk}/transports", m.postTransport()) - r.Get("/nodes/{pk}/transports/{tid}", m.getTransport()) - r.Delete("/nodes/{pk}/transports/{tid}", m.deleteTransport()) - r.Get("/nodes/{pk}/routes", m.getRoutes()) - r.Post("/nodes/{pk}/routes", m.postRoute()) - r.Get("/nodes/{pk}/routes/{rid}", m.getRoute()) - r.Put("/nodes/{pk}/routes/{rid}", m.putRoute()) - r.Delete("/nodes/{pk}/routes/{rid}", m.deleteRoute()) - r.Get("/nodes/{pk}/loops", m.getLoops()) - r.Get("/nodes/{pk}/restart", m.restart()) - r.Post("/nodes/{pk}/exec", m.exec()) + r.Get("/visors", m.getVisors()) + r.Get("/visors/{pk}/health", m.getHealth()) + r.Get("/visors/{pk}/uptime", m.getUptime()) + r.Get("/visors/{pk}", m.getVisor()) + r.Get("/visors/{pk}/apps", m.getApps()) + r.Get("/visors/{pk}/apps/{app}", m.getApp()) + r.Put("/visors/{pk}/apps/{app}", m.putApp()) + r.Get("/visors/{pk}/apps/{app}/logs", m.appLogsSince()) + r.Get("/visors/{pk}/transport-types", m.getTransportTypes()) + r.Get("/visors/{pk}/transports", m.getTransports()) + r.Post("/visors/{pk}/transports", m.postTransport()) + r.Get("/visors/{pk}/transports/{tid}", m.getTransport()) + r.Delete("/visors/{pk}/transports/{tid}", m.deleteTransport()) + r.Get("/visors/{pk}/routes", m.getRoutes()) + r.Post("/visors/{pk}/routes", m.postRoute()) + r.Get("/visors/{pk}/routes/{rid}", m.getRoute()) + r.Put("/visors/{pk}/routes/{rid}", m.putRoute()) + r.Delete("/visors/{pk}/routes/{rid}", m.deleteRoute()) + r.Get("/visors/{pk}/loops", m.getLoops()) + r.Get("/visors/{pk}/restart", m.restart()) + r.Post("/visors/{pk}/exec", m.exec()) }) }) r.ServeHTTP(w, req) } -// VisorHealth represents a node's health report attached to hypervisor to visor request status +// VisorHealth represents a visor's health report attached to hypervisor to visor request status type VisorHealth struct { Status int `json:"status"` *visor.HealthInfo } // provides summary of health information for every visor -func (m *Node) getHealth() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) getHealth() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { vh := &VisorHealth{} type healthRes struct { @@ -214,9 +214,9 @@ func (m *Node) getHealth() http.HandlerFunc { }) } -// getUptime gets given node's uptime -func (m *Node) getUptime() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +// getUptime gets given visor's uptime +func (m *Visor) getUptime() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { u, err := ctx.RPC.Uptime() if err != nil { httputil.WriteJSON(w, r, http.StatusInternalServerError, err) @@ -228,8 +228,8 @@ func (m *Node) getUptime() http.HandlerFunc { } // executes a command and returns its output -func (m *Node) exec() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) exec() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { var reqBody struct { Command string `json:"command"` } @@ -264,16 +264,16 @@ type summaryResp struct { *visor.Summary } -// provides summary of all nodes. -func (m *Node) getNodes() http.HandlerFunc { +// provides summary of all visors. +func (m *Visor) getVisors() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var summaries []summaryResp m.mu.RLock() - for pk, c := range m.nodes { + for pk, c := range m.visors { summary, err := c.Client.Summary() if err != nil { - log.Errorf("failed to obtain summary from AppNode with pk %s. Error: %v", pk, err) + log.Errorf("failed to obtain summary from AppVisor with pk %s. Error: %v", pk, err) summary = &visor.Summary{PubKey: pk} } @@ -290,9 +290,9 @@ func (m *Node) getNodes() http.HandlerFunc { } } -// provides summary of single node. -func (m *Node) getNode() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +// provides summary of single visor. +func (m *Visor) getVisor() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { summary, err := ctx.RPC.Summary() if err != nil { httputil.WriteJSON(w, r, http.StatusInternalServerError, err) @@ -306,9 +306,9 @@ func (m *Node) getNode() http.HandlerFunc { }) } -// returns app summaries of a given node of pk -func (m *Node) getApps() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +// returns app summaries of a given visor of pk +func (m *Visor) getApps() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { apps, err := ctx.RPC.Apps() if err != nil { httputil.WriteJSON(w, r, http.StatusInternalServerError, err) @@ -319,8 +319,8 @@ func (m *Node) getApps() http.HandlerFunc { }) } -// returns an app summary of a given node's pk and app name -func (m *Node) getApp() http.HandlerFunc { +// returns an app summary of a given visor's pk and app name +func (m *Visor) getApp() http.HandlerFunc { return m.withCtx(m.appCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { httputil.WriteJSON(w, r, http.StatusOK, ctx.App) }) @@ -328,7 +328,7 @@ func (m *Node) getApp() http.HandlerFunc { // TODO: simplify // nolint: funlen,gocognit,godox -func (m *Node) putApp() http.HandlerFunc { +func (m *Visor) putApp() http.HandlerFunc { return m.withCtx(m.appCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { var reqBody struct { AutoStart *bool `json:"autostart,omitempty"` @@ -404,7 +404,7 @@ type LogsRes struct { Logs []string `json:"logs"` } -func (m *Node) appLogsSince() http.HandlerFunc { +func (m *Visor) appLogsSince() http.HandlerFunc { return m.withCtx(m.appCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { since := r.URL.Query().Get("since") since = strings.Replace(since, " ", "+", 1) // we need to put '+' again that was replaced in the query string @@ -433,8 +433,8 @@ func (m *Node) appLogsSince() http.HandlerFunc { }) } -func (m *Node) getTransportTypes() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) getTransportTypes() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { types, err := ctx.RPC.TransportTypes() if err != nil { httputil.WriteJSON(w, r, http.StatusInternalServerError, err) @@ -445,8 +445,8 @@ func (m *Node) getTransportTypes() http.HandlerFunc { }) } -func (m *Node) getTransports() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) getTransports() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { qTypes := strSliceFromQuery(r, "type", nil) qPKs, err := pkSliceFromQuery(r, "pk", nil) @@ -470,8 +470,8 @@ func (m *Node) getTransports() http.HandlerFunc { }) } -func (m *Node) postTransport() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) postTransport() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { var reqBody struct { TpType string `json:"transport_type"` Remote cipher.PubKey `json:"remote_pk"` @@ -499,13 +499,13 @@ func (m *Node) postTransport() http.HandlerFunc { }) } -func (m *Node) getTransport() http.HandlerFunc { +func (m *Visor) getTransport() http.HandlerFunc { return m.withCtx(m.tpCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { httputil.WriteJSON(w, r, http.StatusOK, ctx.Tp) }) } -func (m *Node) deleteTransport() http.HandlerFunc { +func (m *Visor) deleteTransport() http.HandlerFunc { return m.withCtx(m.tpCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { if err := ctx.RPC.RemoveTransport(ctx.Tp.ID); err != nil { httputil.WriteJSON(w, r, http.StatusInternalServerError, err) @@ -535,8 +535,8 @@ func makeRoutingRuleResp(key routing.RouteID, rule routing.Rule, summary bool) r return resp } -func (m *Node) getRoutes() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) getRoutes() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { qSummary, err := httputil.BoolFromQuery(r, "summary", false) if err != nil { httputil.WriteJSON(w, r, http.StatusBadRequest, err) @@ -558,8 +558,8 @@ func (m *Node) getRoutes() http.HandlerFunc { }) } -func (m *Node) postRoute() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) postRoute() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { var summary routing.RuleSummary if err := httputil.ReadJSON(r, &summary); err != nil { if err != io.EOF { @@ -586,7 +586,7 @@ func (m *Node) postRoute() http.HandlerFunc { }) } -func (m *Node) getRoute() http.HandlerFunc { +func (m *Visor) getRoute() http.HandlerFunc { return m.withCtx(m.routeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { qSummary, err := httputil.BoolFromQuery(r, "summary", true) if err != nil { @@ -604,7 +604,7 @@ func (m *Node) getRoute() http.HandlerFunc { }) } -func (m *Node) putRoute() http.HandlerFunc { +func (m *Visor) putRoute() http.HandlerFunc { return m.withCtx(m.routeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { var summary routing.RuleSummary if err := httputil.ReadJSON(r, &summary); err != nil { @@ -632,7 +632,7 @@ func (m *Node) putRoute() http.HandlerFunc { }) } -func (m *Node) deleteRoute() http.HandlerFunc { +func (m *Visor) deleteRoute() http.HandlerFunc { return m.withCtx(m.routeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { if err := ctx.RPC.RemoveRoutingRule(ctx.RtKey); err != nil { httputil.WriteJSON(w, r, http.StatusNotFound, err) @@ -659,8 +659,8 @@ func makeLoopResp(info visor.LoopInfo) loopResp { } } -func (m *Node) getLoops() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) getLoops() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { loops, err := ctx.RPC.Loops() if err != nil { httputil.WriteJSON(w, r, http.StatusInternalServerError, err) @@ -677,8 +677,8 @@ func (m *Node) getLoops() http.HandlerFunc { } // NOTE: Reply comes with a delay, because of check if new executable is started successfully. -func (m *Node) restart() http.HandlerFunc { - return m.withCtx(m.nodeCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { +func (m *Visor) restart() http.HandlerFunc { + return m.withCtx(m.visorCtx, func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) { if err := ctx.RPC.Restart(); err != nil { httputil.WriteJSON(w, r, http.StatusInternalServerError, err) return @@ -692,16 +692,16 @@ func (m *Node) restart() http.HandlerFunc { <<< Helper functions >>> */ -func (m *Node) client(pk cipher.PubKey) (dmsg.Addr, visor.RPCClient, bool) { +func (m *Visor) client(pk cipher.PubKey) (dmsg.Addr, visor.RPCClient, bool) { m.mu.RLock() - conn, ok := m.nodes[pk] + conn, ok := m.visors[pk] m.mu.RUnlock() return conn.Addr, conn.Client, ok } type httpCtx struct { - // Node + // Visor PK cipher.PubKey Addr dmsg.Addr RPC visor.RPCClient @@ -721,7 +721,7 @@ type ( handlerFunc func(w http.ResponseWriter, r *http.Request, ctx *httpCtx) ) -func (m *Node) withCtx(vFunc valuesFunc, hFunc handlerFunc) http.HandlerFunc { +func (m *Visor) withCtx(vFunc valuesFunc, hFunc handlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if rv, ok := vFunc(w, r); ok { hFunc(w, r, rv) @@ -729,7 +729,7 @@ func (m *Node) withCtx(vFunc valuesFunc, hFunc handlerFunc) http.HandlerFunc { } } -func (m *Node) nodeCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { +func (m *Visor) visorCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { pk, err := pkFromParam(r, "pk") if err != nil { httputil.WriteJSON(w, r, http.StatusBadRequest, err) @@ -738,7 +738,7 @@ func (m *Node) nodeCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) addr, client, ok := m.client(pk) if !ok { - httputil.WriteJSON(w, r, http.StatusNotFound, fmt.Errorf("node of pk '%s' not found", pk)) + httputil.WriteJSON(w, r, http.StatusNotFound, fmt.Errorf("visor of pk '%s' not found", pk)) return nil, false } @@ -749,8 +749,8 @@ func (m *Node) nodeCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) }, true } -func (m *Node) appCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { - ctx, ok := m.nodeCtx(w, r) +func (m *Visor) appCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { + ctx, ok := m.visorCtx(w, r) if !ok { return nil, false } @@ -770,14 +770,14 @@ func (m *Node) appCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { } } - errMsg := fmt.Errorf("can not find app of name %s from node %s", appName, ctx.PK) + errMsg := fmt.Errorf("can not find app of name %s from visor %s", appName, ctx.PK) httputil.WriteJSON(w, r, http.StatusNotFound, errMsg) return nil, false } -func (m *Node) tpCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { - ctx, ok := m.nodeCtx(w, r) +func (m *Visor) tpCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { + ctx, ok := m.visorCtx(w, r) if !ok { return nil, false } @@ -807,8 +807,8 @@ func (m *Node) tpCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { return ctx, true } -func (m *Node) routeCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { - ctx, ok := m.nodeCtx(w, r) +func (m *Visor) routeCtx(w http.ResponseWriter, r *http.Request) (*httpCtx, bool) { + ctx, ok := m.visorCtx(w, r) if !ok { return nil, false } diff --git a/pkg/hypervisor/hypervisor_test.go b/pkg/hypervisor/hypervisor_test.go index 0d112ff61..bff85c593 100644 --- a/pkg/hypervisor/hypervisor_test.go +++ b/pkg/hypervisor/hypervisor_test.go @@ -42,7 +42,7 @@ const ( badCreateAccountPayload = `{"username":"invalid_user","password":"Secure1234!"}` ) -func TestNewNode(t *testing.T) { +func TestNewVisor(t *testing.T) { config := makeConfig(false) confDir, err := ioutil.TempDir(os.TempDir(), "SWHV") @@ -51,45 +51,45 @@ func TestNewNode(t *testing.T) { config.DBPath = filepath.Join(confDir, "users.db") t.Run("no_access_without_login", func(t *testing.T) { - testNodeNoAccessWithoutLogin(t, config) + testVisorNoAccessWithoutLogin(t, config) }) t.Run("only_admin_account_allowed", func(t *testing.T) { - testNodeOnlyAdminAccountAllowed(t, config) + testVisorOnlyAdminAccountAllowed(t, config) }) t.Run("cannot_login_twice", func(t *testing.T) { - testNodeCannotLoginTwice(t, config) + testVisorCannotLoginTwice(t, config) }) t.Run("access_after_login", func(t *testing.T) { - testNodeAccessAfterLogin(t, config) + testVisorAccessAfterLogin(t, config) }) t.Run("no_access_after_logout", func(t *testing.T) { - testNodeNoAccessAfterLogout(t, config) + testVisorNoAccessAfterLogout(t, config) }) t.Run("change_password", func(t *testing.T) { - testNodeChangePassword(t, config) + testVisorChangePassword(t, config) }) } -func makeStartNode(t *testing.T, config Config) (string, *http.Client, func()) { +func makeStartVisor(t *testing.T, config Config) (string, *http.Client, func()) { // nolint: gomnd defaultMockConfig := MockConfig{ - Nodes: 5, - MaxTpsPerNode: 10, - MaxRoutesPerNode: 10, - EnableAuth: true, + Visors: 5, + MaxTpsPerVisor: 10, + MaxRoutesPerVisor: 10, + EnableAuth: true, } - node, err := NewNode(config) + visor, err := NewVisor(config) require.NoError(t, err) - require.NoError(t, node.AddMockData(defaultMockConfig)) + require.NoError(t, visor.AddMockData(defaultMockConfig)) - srv := httptest.NewTLSServer(node) - node.c.Cookies.Domain = srv.Listener.Addr().String() + srv := httptest.NewTLSServer(visor) + visor.c.Cookies.Domain = srv.Listener.Addr().String() client := srv.Client() jar, err := cookiejar.New(&cookiejar.Options{}) @@ -143,8 +143,8 @@ func testCase(t *testing.T, addr string, client *http.Client, tc TestCase, testT } } -func testNodeNoAccessWithoutLogin(t *testing.T, config Config) { - addr, client, stop := makeStartNode(t, config) +func testVisorNoAccessWithoutLogin(t *testing.T, config Config) { + addr, client, stop := makeStartVisor(t, config) defer stop() makeCase := func(method string, uri string, body io.Reader) TestCase { @@ -164,12 +164,12 @@ func testNodeNoAccessWithoutLogin(t *testing.T, config Config) { testCases(t, addr, client, []TestCase{ makeCase(http.MethodGet, "/api/user", nil), makeCase(http.MethodPost, "/api/change-password", strings.NewReader(`{"old_password":"old","new_password":"new"}`)), - makeCase(http.MethodGet, "/api/nodes", nil), + makeCase(http.MethodGet, "/api/visors", nil), }) } -func testNodeOnlyAdminAccountAllowed(t *testing.T, config Config) { - addr, client, stop := makeStartNode(t, config) +func testVisorOnlyAdminAccountAllowed(t *testing.T, config Config) { + addr, client, stop := makeStartVisor(t, config) defer stop() testCases(t, addr, client, []TestCase{ @@ -198,8 +198,8 @@ func testNodeOnlyAdminAccountAllowed(t *testing.T, config Config) { }) } -func testNodeCannotLoginTwice(t *testing.T, config Config) { - addr, client, stop := makeStartNode(t, config) +func testVisorCannotLoginTwice(t *testing.T, config Config) { + addr, client, stop := makeStartVisor(t, config) defer stop() testCases(t, addr, client, []TestCase{ @@ -239,8 +239,8 @@ func testNodeCannotLoginTwice(t *testing.T, config Config) { }) } -func testNodeAccessAfterLogin(t *testing.T, config Config) { - addr, client, stop := makeStartNode(t, config) +func testVisorAccessAfterLogin(t *testing.T, config Config) { + addr, client, stop := makeStartVisor(t, config) defer stop() testCases(t, addr, client, []TestCase{ @@ -273,14 +273,14 @@ func testNodeAccessAfterLogin(t *testing.T, config Config) { }, { ReqMethod: http.MethodGet, - ReqURI: "/api/nodes", + ReqURI: "/api/visors", RespStatus: http.StatusOK, }, }) } -func testNodeNoAccessAfterLogout(t *testing.T, config Config) { - addr, client, stop := makeStartNode(t, config) +func testVisorNoAccessAfterLogout(t *testing.T, config Config) { + addr, client, stop := makeStartVisor(t, config) defer stop() testCases(t, addr, client, []TestCase{ @@ -328,7 +328,7 @@ func testNodeNoAccessAfterLogout(t *testing.T, config Config) { }, { ReqMethod: http.MethodGet, - ReqURI: "/api/nodes", + ReqURI: "/api/visors", RespStatus: http.StatusUnauthorized, RespBody: func(t *testing.T, r *http.Response) { body, err := decodeErrorBody(r.Body) @@ -347,8 +347,8 @@ func testNodeNoAccessAfterLogout(t *testing.T, config Config) { // - Login with old password (should fail). // - Login with new password (should succeed). // nolint: funlen -func testNodeChangePassword(t *testing.T, config Config) { - addr, client, stop := makeStartNode(t, config) +func testVisorChangePassword(t *testing.T, config Config) { + addr, client, stop := makeStartVisor(t, config) defer stop() // To emulate an active session. @@ -391,7 +391,7 @@ func testNodeChangePassword(t *testing.T, config Config) { }, { ReqMethod: http.MethodGet, - ReqURI: "/api/nodes", + ReqURI: "/api/visors", ReqMod: func(req *http.Request) { for _, cookie := range cookies { req.AddCookie(cookie) diff --git a/pkg/router/route_group.go b/pkg/router/route_group.go index 31240c0a7..a3f91af1c 100644 --- a/pkg/router/route_group.go +++ b/pkg/router/route_group.go @@ -384,7 +384,7 @@ func (rg *RouteGroup) close(code routing.CloseCode) error { rg.broadcastClosePackets(code) if closeInitiator { - // if this node initiated closing, we need to wait for close packets + // if this visor initiated closing, we need to wait for close packets // to come back, or to exit with a timeout if anything goes wrong in // the network if err := rg.waitForCloseLoop(closeRoutineTimeout); err != nil { diff --git a/pkg/router/router.go b/pkg/router/router.go index ab0ba7c92..31bd549b1 100644 --- a/pkg/router/router.go +++ b/pkg/router/router.go @@ -108,7 +108,7 @@ type Router interface { SetupIsTrusted(cipher.PubKey) bool } -// Router implements node.PacketRouter. It manages routing table by +// Router implements visor.PacketRouter. It manages routing table by // communicating with setup nodes, forward packets according to local // rules and manages loops for apps. type router struct { @@ -295,7 +295,7 @@ func (r *router) serveSetup() { } if !r.SetupIsTrusted(conn.RemotePK()) { - r.logger.Warnf("closing conn from untrusted setup node: %v", conn.Close()) + r.logger.Warnf("closing conn from untrusted setup visor: %v", conn.Close()) continue } @@ -573,7 +573,7 @@ fetchRoutesAgain: return paths[forward][0], paths[backward][0], nil } -// SetupIsTrusted checks if setup node is trusted. +// SetupIsTrusted checks if setup visor is trusted. func (r *router) SetupIsTrusted(sPK cipher.PubKey) bool { _, ok := r.trustedNodes[sPK] return ok diff --git a/pkg/router/router_test.go b/pkg/router/router_test.go index 103decba9..47b518df1 100644 --- a/pkg/router/router_test.go +++ b/pkg/router/router_test.go @@ -70,7 +70,7 @@ func Test_router_DialRoutes(t *testing.T) { r0.conf.RouteGroupDialer = setupclient.NewMockDialer() r1.conf.RouteGroupDialer = setupclient.NewMockDialer() - // prepare loop creation (client_1 will use this to request loop creation with setup node). + // prepare loop creation (client_1 will use this to request loop creation with setup visor). desc := routing.NewRouteDescriptor(r0.conf.PubKey, r1.conf.PubKey, 1, 1) forwardHops := []routing.Hop{ diff --git a/pkg/routing/route.go b/pkg/routing/route.go index 52c05a613..2e7c0607d 100644 --- a/pkg/routing/route.go +++ b/pkg/routing/route.go @@ -11,7 +11,7 @@ import ( "github.com/google/uuid" ) -// Route is a succession of transport entries that denotes a path from source node to destination node +// Route is a succession of transport entries that denotes a path from source visor to destination visor type Route struct { Desc RouteDescriptor `json:"desc"` Path Path `json:"path"` diff --git a/pkg/routing/rule.go b/pkg/routing/rule.go index 31b703d16..8f0283a38 100644 --- a/pkg/routing/rule.go +++ b/pkg/routing/rule.go @@ -37,12 +37,12 @@ func (rt RuleType) String() string { } const ( - // RuleConsume represents a hop to the route's destination node. + // RuleConsume represents a hop to the route's destination visor. // A packet referencing this rule is to be consumed locally. RuleConsume = RuleType(0) - // RuleForward represents a hop from the route's source node. - // A packet referencing this rule is to be sent to a remote node. + // RuleForward represents a hop from the route's source visor. + // A packet referencing this rule is to be sent to a remote visor. RuleForward = RuleType(1) // RuleIntermediaryForward represents a hop which is not from the route's source, diff --git a/pkg/setup/config.go b/pkg/setup/config.go index 9446728e0..7afca1954 100644 --- a/pkg/setup/config.go +++ b/pkg/setup/config.go @@ -6,13 +6,13 @@ import ( "github.com/SkycoinProject/dmsg/cipher" ) -// Various timeouts for setup node. +// Various timeouts for setup visor. const ( RequestTimeout = time.Second * 60 ReadTimeout = time.Second * 30 ) -// Config defines configuration parameters for setup Node. +// Config defines configuration parameters for setup Visor. type Config struct { PubKey cipher.PubKey `json:"public_key"` SecKey cipher.SecKey `json:"secret_key"` diff --git a/pkg/setup/node.go b/pkg/setup/node.go index 84a14b356..f114456a9 100644 --- a/pkg/setup/node.go +++ b/pkg/setup/node.go @@ -17,7 +17,7 @@ import ( "github.com/SkycoinProject/skywire-mainnet/pkg/routing" ) -// Node performs routes setup operations over messaging channel. +// Visor performs routes setup operations over messaging channel. type Node struct { logger *logging.Logger dmsgC *dmsg.Client @@ -78,7 +78,7 @@ func (sn *Node) Close() error { // Serve starts transport listening loop. func (sn *Node) Serve() error { - sn.logger.Info("Serving setup node") + sn.logger.Info("Serving setup visor") for { conn, err := sn.dmsgL.AcceptStream() @@ -147,7 +147,7 @@ func (sn *Node) handleDialRouteGroup(ctx context.Context, route routing.Bidirect return routing.EdgeRules{}, fmt.Errorf("failed to confirm loop with destination visor: %v", err) } - sn.logger.Infof("Returning route rules to initiating node: %v", initRouteRules) + sn.logger.Infof("Returning route rules to initiating visor: %v", initRouteRules) return initRouteRules, nil } @@ -160,7 +160,7 @@ func (sn *Node) addIntermediaryRules(ctx context.Context, intermediaryRules Rule for pk, rules := range intermediaryRules { pk, rules := pk, rules - sn.logger.WithField("remote", pk).Info("Adding rules to intermediary node") + sn.logger.WithField("remote", pk).Info("Adding rules to intermediary visor") wg.Add(1) diff --git a/pkg/setup/node_test.go b/pkg/setup/node_test.go index 050774654..50f140b97 100644 --- a/pkg/setup/node_test.go +++ b/pkg/setup/node_test.go @@ -56,7 +56,7 @@ type clientWithDMSGAddrAndListener struct { } func TestNode(t *testing.T) { - // We are generating five key pairs - one for the `Router` of setup node, + // We are generating five key pairs - one for the `Router` of setup visor, // the other ones - for the clients along the desired route. keys := snettest.GenKeyPairs(5) @@ -66,20 +66,20 @@ func TestNode(t *testing.T) { reservedIDs := []routing.RouteID{1, 2} - // TEST: Emulates the communication between 4 visor nodes and a setup node, - // where the first client node initiates a route to the last. + // TEST: Emulates the communication between 4 visor nodes and a setup visor, + // where the first client visor initiates a route to the last. t.Run("DialRouteGroup", func(t *testing.T) { testDialRouteGroup(t, keys, nEnv, reservedIDs) }) } func testDialRouteGroup(t *testing.T, keys []snettest.KeyPair, nEnv *snettest.Env, reservedIDs []routing.RouteID) { - // client index 0 is for setup node. + // client index 0 is for setup visor. // clients index 1 to 4 are for visor nodes. clients, closeClients := prepClients(t, keys, nEnv, reservedIDs, 5) defer closeClients() - // prepare and serve setup node (using client 0). + // prepare and serve setup visor (using client 0). _, closeSetup := prepSetupNode(t, clients[0].Client, clients[0].Listener) defer closeSetup() @@ -120,7 +120,7 @@ func testDialRouteGroup(t *testing.T, keys []snettest.KeyPair, nEnv *snettest.En } func prepBidirectionalRoute(clients []clientWithDMSGAddrAndListener) routing.BidirectionalRoute { - // prepare loop creation (client_1 will use this to request loop creation with setup node). + // prepare loop creation (client_1 will use this to request loop creation with setup visor). desc := routing.NewRouteDescriptor(clients[1].Addr.PK, clients[4].Addr.PK, 1, 1) forwardHops := []routing.Hop{ @@ -178,7 +178,7 @@ func prepClients( for i := 0; i < n; i++ { var port uint16 - // setup node + // setup visor if i == 0 { port = skyenv.DmsgSetupPort } else { @@ -208,7 +208,7 @@ func prepClients( fmt.Printf("Client %d PK: %s\n", i, clients[i].Addr.PK) - // exclude setup node + // exclude setup visor if i == 0 { continue } @@ -227,7 +227,7 @@ func prepClients( func prepRouter(client *clientWithDMSGAddrAndListener, reservedIDs []routing.RouteID, last bool) *router.MockRouter { r := &router.MockRouter{} - // passing two rules to each node (forward and reverse routes). Simulate + // passing two rules to each visor (forward and reverse routes). Simulate // applying intermediary rules. r.On("SaveRoutingRules", mock.Anything, mock.Anything). Return(func(rules ...routing.Rule) error { @@ -238,7 +238,7 @@ func prepRouter(client *clientWithDMSGAddrAndListener, reservedIDs []routing.Rou // simulate reserving IDs. r.On("ReserveKeys", 2).Return(reservedIDs, testhelpers.NoErr) - // destination node. Simulate applying edge rules. + // destination visor. Simulate applying edge rules. if last { r.On("IntroduceRules", mock.Anything).Return(func(rules routing.EdgeRules) error { client.AppliedEdgeRules = rules diff --git a/pkg/setup/rpc_gateway.go b/pkg/setup/rpc_gateway.go index e5be746c0..15aeaf4f3 100644 --- a/pkg/setup/rpc_gateway.go +++ b/pkg/setup/rpc_gateway.go @@ -11,7 +11,7 @@ import ( "github.com/SkycoinProject/skywire-mainnet/pkg/routing" ) -// RPCGateway is a RPC interface for setup node. +// RPCGateway is a RPC interface for setup visor. type RPCGateway struct { logger *logging.Logger reqPK cipher.PubKey diff --git a/pkg/setup/setupclient/client.go b/pkg/setup/setupclient/client.go index c4d7c0f55..e2957c27f 100644 --- a/pkg/setup/setupclient/client.go +++ b/pkg/setup/setupclient/client.go @@ -14,7 +14,7 @@ import ( const rpcName = "RPCGateway" -// Client is an RPC client for setup node. +// Client is an RPC client for setup visor. type Client struct { log *logging.Logger n *snet.Network @@ -47,14 +47,14 @@ func (c *Client) dial(ctx context.Context) (*snet.Conn, error) { for _, sPK := range c.setupNodes { conn, err := c.n.Dial(ctx, snet.DmsgType, sPK, snet.SetupPort) if err != nil { - c.log.WithError(err).Warnf("failed to dial to setup node: setupPK(%s)", sPK) + c.log.WithError(err).Warnf("failed to dial to setup visor: setupPK(%s)", sPK) continue } return conn, nil } - return nil, errors.New("failed to dial to a setup node") + return nil, errors.New("failed to dial to a setup visor") } // Close closes a Client. diff --git a/pkg/snet/network.go b/pkg/snet/network.go index 03be248e8..e93fc2942 100644 --- a/pkg/snet/network.go +++ b/pkg/snet/network.go @@ -21,9 +21,9 @@ import ( // Default ports. // TODO(evanlinjin): Define these properly. These are currently random. const ( - SetupPort = uint16(36) // Listening port of a setup node. - AwaitSetupPort = uint16(136) // Listening port of a visor node for setup operations. - TransportPort = uint16(45) // Listening port of a visor node for incoming transports. + SetupPort = uint16(36) // Listening port of a setup visor. + AwaitSetupPort = uint16(136) // Listening port of a visor visor for setup operations. + TransportPort = uint16(45) // Listening port of a visor visor for incoming transports. ) // Network types. @@ -155,7 +155,7 @@ type Dialer interface { Type() string } -// Dial dials a node by its public key and returns a connection. +// Dial dials a visor by its public key and returns a connection. func (n *Network) Dial(ctx context.Context, network string, pk cipher.PubKey, port uint16) (*Conn, error) { switch network { case DmsgType: diff --git a/pkg/transport/handshake.go b/pkg/transport/handshake.go index 535eb743c..6ec868ccd 100644 --- a/pkg/transport/handshake.go +++ b/pkg/transport/handshake.go @@ -91,7 +91,7 @@ func MakeSettlementHS(init bool) SettlementHS { } }() - // create signed entry and send it to responding visor node. + // create signed entry and send it to responding visor visor. se, ok := NewSignedEntry(&entry, conn.LocalPK(), sk) if !ok { return errors.New("failed to sign entry") @@ -130,7 +130,7 @@ func MakeSettlementHS(init bool) SettlementHS { log.WithError(err).Error("Failed to register transports") } - // inform initiating visor node. + // inform initiating visor visor. if _, err := conn.Write([]byte{1}); err != nil { return fmt.Errorf("failed to accept transport settlement: write failed: %v", err) } diff --git a/pkg/transport/manager.go b/pkg/transport/manager.go index cb8d037a4..7321cce06 100644 --- a/pkg/transport/manager.go +++ b/pkg/transport/manager.go @@ -23,7 +23,7 @@ import ( type ManagerConfig struct { PubKey cipher.PubKey SecKey cipher.SecKey - DefaultNodes []cipher.PubKey // Nodes to automatically connect to + DefaultVisors []cipher.PubKey // Visors to automatically connect to DiscoveryClient DiscoveryClient LogStore LogStore } @@ -139,7 +139,7 @@ func (tm *Manager) initTransports(ctx context.Context) { entries, err := tm.Conf.DiscoveryClient.GetTransportsByEdge(ctx, tm.Conf.PubKey) if err != nil { - log.Warnf("No transports found for local node: %v", err) + log.Warnf("No transports found for local visor: %v", err) } for _, entry := range entries { var ( @@ -188,7 +188,7 @@ func (tm *Manager) acceptTransport(ctx context.Context, lis *snet.Listener) erro return nil } -// SaveTransport begins to attempt to establish data transports to the given 'remote' node. +// SaveTransport begins to attempt to establish data transports to the given 'remote' visor. func (tm *Manager) SaveTransport(ctx context.Context, remote cipher.PubKey, tpType string) (*ManagedTransport, error) { tm.mx.Lock() defer tm.mx.Unlock() diff --git a/pkg/util/pathutil/configpath.go b/pkg/util/pathutil/configpath.go index fab9a8951..117eaa31c 100644 --- a/pkg/util/pathutil/configpath.go +++ b/pkg/util/pathutil/configpath.go @@ -71,8 +71,8 @@ func (dp ConfigPaths) Get(cpType ConfigLocationType) string { return "" } -// NodeDefaults returns the default config paths for skywire-visor. -func NodeDefaults() ConfigPaths { +// VisorDefaults returns the default config paths for skywire-visor. +func VisorDefaults() ConfigPaths { paths := make(ConfigPaths) if wd, err := os.Getwd(); err == nil { paths[WorkingDirLoc] = filepath.Join(wd, "skywire-config.json") diff --git a/pkg/util/pathutil/homedir.go b/pkg/util/pathutil/homedir.go index 454afc0da..12b869c4e 100644 --- a/pkg/util/pathutil/homedir.go +++ b/pkg/util/pathutil/homedir.go @@ -23,8 +23,8 @@ func HomeDir() string { return os.Getenv("HOME") } -// NodeDir returns a path to a directory used to store specific node configuration. Such dir is ~/.skywire/{PK} -func NodeDir(pk cipher.PubKey) string { +// VisorDir returns a path to a directory used to store specific visor configuration. Such dir is ~/.skywire/{PK} +func VisorDir(pk cipher.PubKey) string { return filepath.Join(HomeDir(), ".skycoin", "skywire", pk.String()) } diff --git a/pkg/visor/config.go b/pkg/visor/config.go index 9137eb1ab..1dee6166c 100644 --- a/pkg/visor/config.go +++ b/pkg/visor/config.go @@ -18,16 +18,16 @@ import ( trClient "github.com/SkycoinProject/skywire-mainnet/pkg/transport-discovery/client" ) -// Config defines configuration parameters for Node. +// Config defines configuration parameters for Visor. // TODO(evanlinjin): Instead of having nested structs, make separate types for each field. // TODO(evanlinjin): Use pointers to allow nil-configs for non-crucial fields. type Config struct { Version string `json:"version"` - Node struct { + Visor struct { StaticPubKey cipher.PubKey `json:"static_public_key"` StaticSecKey cipher.SecKey `json:"static_secret_key"` - } `json:"node"` + } `json:"visor"` STCP struct { PubKeyTable map[cipher.PubKey]string `json:"pk_table"` @@ -61,8 +61,8 @@ type Config struct { Apps []AppConfig `json:"apps"` - TrustedNodes []cipher.PubKey `json:"trusted_nodes"` - Hypervisors []HypervisorConfig `json:"hypervisors"` + TrustedVisors []cipher.PubKey `json:"trusted_nodes"` + Hypervisors []HypervisorConfig `json:"hypervisors"` AppsPath string `json:"apps_path"` LocalPath string `json:"local_path"` @@ -86,8 +86,8 @@ func (c *Config) DmsgConfig() (*DmsgConfig, error) { } return &DmsgConfig{ - PubKey: c.Node.StaticPubKey, - SecKey: c.Node.StaticSecKey, + PubKey: c.Visor.StaticPubKey, + SecKey: c.Visor.StaticSecKey, Discovery: disc.NewHTTP(dmsgConfig.Discovery), Retries: 5, RetryDelay: time.Second, @@ -103,8 +103,8 @@ func (c *Config) DmsgPtyHost(dmsgC *dmsg.Client) (*dmsgpty.Host, error) { return dmsgpty.NewHostFromDmsgClient( nil, dmsgC, - c.Node.StaticPubKey, - c.Node.StaticSecKey, + c.Visor.StaticPubKey, + c.Visor.StaticSecKey, c.DmsgPty.AuthFile, c.DmsgPty.Port, c.DmsgPty.CLINet, @@ -118,7 +118,7 @@ func (c *Config) TransportDiscovery() (transport.DiscoveryClient, error) { return nil, errors.New("empty transport_discovery") } - return trClient.NewHTTP(c.Transport.Discovery, c.Node.StaticPubKey, c.Node.StaticSecKey) + return trClient.NewHTTP(c.Transport.Discovery, c.Visor.StaticPubKey, c.Visor.StaticSecKey) } // TransportLogStore returns configure transport.LogStore. diff --git a/pkg/visor/config_test.go b/pkg/visor/config_test.go index ad1e5f8cd..acfa36341 100644 --- a/pkg/visor/config_test.go +++ b/pkg/visor/config_test.go @@ -21,8 +21,8 @@ import ( func TestDmsgDiscovery(t *testing.T) { pk, sk := cipher.GenerateKeyPair() conf := Config{} - conf.Node.StaticPubKey = pk - conf.Node.StaticSecKey = sk + conf.Visor.StaticPubKey = pk + conf.Visor.StaticSecKey = sk conf.Dmsg.Discovery = "skywire.skycoin.net:8001" conf.Dmsg.SessionsCount = 10 diff --git a/pkg/visor/rpc.go b/pkg/visor/rpc.go index 58154f647..21ceb683f 100644 --- a/pkg/visor/rpc.go +++ b/pkg/visor/rpc.go @@ -19,7 +19,7 @@ import ( const ( // RPCPrefix is the prefix used with all RPC calls. - RPCPrefix = "app-node" + RPCPrefix = "app-visor" ) var ( @@ -36,9 +36,9 @@ var ( ErrMalformedRestartContext = errors.New("restart context is malformed") ) -// RPC defines RPC methods for Node. +// RPC defines RPC methods for Visor. type RPC struct { - node *Node + visor *Visor } /* @@ -58,16 +58,16 @@ func (r *RPC) Health(_ *struct{}, out *HealthInfo) error { out.RouteFinder = http.StatusOK out.SetupNode = http.StatusOK - _, err := r.node.conf.TransportDiscovery() + _, err := r.visor.conf.TransportDiscovery() if err != nil { out.TransportDiscovery = http.StatusNotFound } - if r.node.conf.Routing.RouteFinder == "" { + if r.visor.conf.Routing.RouteFinder == "" { out.RouteFinder = http.StatusNotFound } - if len(r.node.conf.Routing.SetupNodes) == 0 { + if len(r.visor.conf.Routing.SetupNodes) == 0 { out.SetupNode = http.StatusNotFound } @@ -80,7 +80,7 @@ func (r *RPC) Health(_ *struct{}, out *HealthInfo) error { // Uptime returns for how long the visor has been running in seconds func (r *RPC) Uptime(_ *struct{}, out *float64) error { - *out = time.Since(r.node.startedAt).Seconds() + *out = time.Since(r.visor.startedAt).Seconds() return nil } @@ -98,7 +98,7 @@ type AppLogsRequest struct { // LogsSince returns all logs from an specific app since the timestamp func (r *RPC) LogsSince(in *AppLogsRequest, out *[]string) error { - ls, err := app.NewLogStore(filepath.Join(r.node.dir(), in.AppName), in.AppName, "bbolt") + ls, err := app.NewLogStore(filepath.Join(r.visor.dir(), in.AppName), in.AppName, "bbolt") if err != nil { return err } @@ -155,18 +155,18 @@ type Summary struct { // Summary provides a summary of the AppNode. func (r *RPC) Summary(_ *struct{}, out *Summary) error { var summaries []*TransportSummary - r.node.tm.WalkTransports(func(tp *transport.ManagedTransport) bool { + r.visor.tm.WalkTransports(func(tp *transport.ManagedTransport) bool { summaries = append(summaries, - newTransportSummary(r.node.tm, tp, false, r.node.router.SetupIsTrusted(tp.Remote()))) + newTransportSummary(r.visor.tm, tp, false, r.visor.router.SetupIsTrusted(tp.Remote()))) return true }) *out = Summary{ PubKey: r.node.conf.Node.StaticPubKey, BuildInfo: buildinfo.Get(), AppProtoVersion: supportedProtocolVersion, - Apps: r.node.Apps(), + Apps: r.visor.Apps(), Transports: summaries, - RoutesCount: r.node.rt.Count(), + RoutesCount: r.visor.rt.Count(), } return nil } @@ -174,7 +174,7 @@ func (r *RPC) Summary(_ *struct{}, out *Summary) error { // Exec executes a given command in cmd and writes its output to out. func (r *RPC) Exec(cmd *string, out *[]byte) error { var err error - *out, err = r.node.Exec(*cmd) + *out, err = r.visor.Exec(*cmd) return err } @@ -182,20 +182,20 @@ func (r *RPC) Exec(cmd *string, out *[]byte) error { <<< APP MANAGEMENT >>> */ -// Apps returns list of Apps registered on the Node. +// Apps returns list of Apps registered on the Visor. func (r *RPC) Apps(_ *struct{}, reply *[]*AppState) error { - *reply = r.node.Apps() + *reply = r.visor.Apps() return nil } // StartApp start App with provided name. func (r *RPC) StartApp(name *string, _ *struct{}) error { - return r.node.StartApp(*name) + return r.visor.StartApp(*name) } // StopApp stops App with provided name. func (r *RPC) StopApp(name *string, _ *struct{}) error { - return r.node.StopApp(*name) + return r.visor.StopApp(*name) } // SetAutoStartIn is input for SetAutoStart. @@ -206,26 +206,26 @@ type SetAutoStartIn struct { // SetAutoStart sets auto-start settings for an app. func (r *RPC) SetAutoStart(in *SetAutoStartIn, _ *struct{}) error { - return r.node.setAutoStart(in.AppName, in.AutoStart) + return r.visor.setAutoStart(in.AppName, in.AutoStart) } // SetSocksPassword sets password for skysocks. func (r *RPC) SetSocksPassword(in *string, _ *struct{}) error { - return r.node.setSocksPassword(*in) + return r.visor.setSocksPassword(*in) } // SetSocksClientPK sets PK for skysocks-client. func (r *RPC) SetSocksClientPK(in *cipher.PubKey, _ *struct{}) error { - return r.node.setSocksClientPK(*in) + return r.visor.setSocksClientPK(*in) } /* <<< TRANSPORT MANAGEMENT >>> */ -// TransportTypes lists all transport types supported by the Node. +// TransportTypes lists all transport types supported by the Visor. func (r *RPC) TransportTypes(_ *struct{}, out *[]string) error { - *out = r.node.tm.Networks() + *out = r.visor.tm.Networks() return nil } @@ -236,7 +236,7 @@ type TransportsIn struct { ShowLogs bool } -// Transports lists Transports of the Node and provides a summary of each. +// Transports lists Transports of the Visor and provides a summary of each. func (r *RPC) Transports(in *TransportsIn, out *[]*TransportSummary) error { typeIncluded := func(tType string) bool { if in.FilterTypes != nil { @@ -260,9 +260,9 @@ func (r *RPC) Transports(in *TransportsIn, out *[]*TransportSummary) error { } return true } - r.node.tm.WalkTransports(func(tp *transport.ManagedTransport) bool { - if typeIncluded(tp.Type()) && pkIncluded(r.node.tm.Local(), tp.Remote()) { - *out = append(*out, newTransportSummary(r.node.tm, tp, in.ShowLogs, r.node.router.SetupIsTrusted(tp.Remote()))) + r.visor.tm.WalkTransports(func(tp *transport.ManagedTransport) bool { + if typeIncluded(tp.Type()) && pkIncluded(r.visor.tm.Local(), tp.Remote()) { + *out = append(*out, newTransportSummary(r.visor.tm, tp, in.ShowLogs, r.visor.router.SetupIsTrusted(tp.Remote()))) } return true }) @@ -271,11 +271,11 @@ func (r *RPC) Transports(in *TransportsIn, out *[]*TransportSummary) error { // Transport obtains a Transport Summary of Transport of given Transport ID. func (r *RPC) Transport(in *uuid.UUID, out *TransportSummary) error { - tp := r.node.tm.Transport(*in) + tp := r.visor.tm.Transport(*in) if tp == nil { return ErrNotFound } - *out = *newTransportSummary(r.node.tm, tp, true, r.node.router.SetupIsTrusted(tp.Remote())) + *out = *newTransportSummary(r.visor.tm, tp, true, r.visor.router.SetupIsTrusted(tp.Remote())) return nil } @@ -287,7 +287,7 @@ type AddTransportIn struct { Timeout time.Duration } -// AddTransport creates a transport for the node. +// AddTransport creates a transport for the visor. func (r *RPC) AddTransport(in *AddTransportIn, out *TransportSummary) error { ctx := context.Background() @@ -297,18 +297,18 @@ func (r *RPC) AddTransport(in *AddTransportIn, out *TransportSummary) error { defer cancel() } - tp, err := r.node.tm.SaveTransport(ctx, in.RemotePK, in.TpType) + tp, err := r.visor.tm.SaveTransport(ctx, in.RemotePK, in.TpType) if err != nil { return err } - *out = *newTransportSummary(r.node.tm, tp, false, r.node.router.SetupIsTrusted(tp.Remote())) + *out = *newTransportSummary(r.visor.tm, tp, false, r.visor.router.SetupIsTrusted(tp.Remote())) return nil } -// RemoveTransport removes a Transport from the node. +// RemoveTransport removes a Transport from the visor. func (r *RPC) RemoveTransport(tid *uuid.UUID, _ *struct{}) error { - r.node.tm.DeleteTransport(*tid) + r.visor.tm.DeleteTransport(*tid) return nil } @@ -318,7 +318,7 @@ func (r *RPC) RemoveTransport(tid *uuid.UUID, _ *struct{}) error { // DiscoverTransportsByPK obtains available transports via the transport discovery via given public key. func (r *RPC) DiscoverTransportsByPK(pk *cipher.PubKey, out *[]*transport.EntryWithStatus) error { - tpD, err := r.node.conf.TransportDiscovery() + tpD, err := r.visor.conf.TransportDiscovery() if err != nil { return err } @@ -334,7 +334,7 @@ func (r *RPC) DiscoverTransportsByPK(pk *cipher.PubKey, out *[]*transport.EntryW // DiscoverTransportByID obtains available transports via the transport discovery via a given transport ID. func (r *RPC) DiscoverTransportByID(id *uuid.UUID, out *transport.EntryWithStatus) error { - tpD, err := r.node.conf.TransportDiscovery() + tpD, err := r.visor.conf.TransportDiscovery() if err != nil { return err } @@ -354,25 +354,25 @@ func (r *RPC) DiscoverTransportByID(id *uuid.UUID, out *transport.EntryWithStatu // RoutingRules obtains all routing rules of the RoutingTable. func (r *RPC) RoutingRules(_ *struct{}, out *[]routing.Rule) error { - *out = r.node.rt.AllRules() + *out = r.visor.rt.AllRules() return nil } // RoutingRule obtains a routing rule of given RouteID. func (r *RPC) RoutingRule(key *routing.RouteID, rule *routing.Rule) error { var err error - *rule, err = r.node.rt.Rule(*key) + *rule, err = r.visor.rt.Rule(*key) return err } // SaveRoutingRule saves a routing rule. func (r *RPC) SaveRoutingRule(in *routing.Rule, _ *struct{}) error { - return r.node.rt.SaveRule(*in) + return r.visor.rt.SaveRule(*in) } // RemoveRoutingRule removes a RoutingRule based on given RouteID key. func (r *RPC) RemoveRoutingRule(key *routing.RouteID, _ *struct{}) error { - r.node.rt.DelRules([]routing.RouteID{*key}) + r.visor.rt.DelRules([]routing.RouteID{*key}) return nil } @@ -391,14 +391,14 @@ type LoopInfo struct { func (r *RPC) Loops(_ *struct{}, out *[]LoopInfo) error { var loops []LoopInfo - rules := r.node.rt.AllRules() + rules := r.visor.rt.AllRules() for _, rule := range rules { if rule.Type() != routing.RuleConsume { continue } fwdRID := rule.NextRouteID() - rule, err := r.node.rt.Rule(fwdRID) + rule, err := r.visor.rt.Rule(fwdRID) if err != nil { return err } @@ -430,9 +430,9 @@ func (r *RPC) Restart(_ *struct{}, _ *struct{}) (err error) { } }() - if r.node.restartCtx == nil { + if r.visor.restartCtx == nil { return ErrMalformedRestartContext } - return r.node.restartCtx.Start() + return r.visor.restartCtx.Start() } diff --git a/pkg/visor/rpc_test.go b/pkg/visor/rpc_test.go index ddb346cc3..9de042da4 100644 --- a/pkg/visor/rpc_test.go +++ b/pkg/visor/rpc_test.go @@ -29,14 +29,14 @@ func TestHealth(t *testing.T) { sPK, sSK := cipher.GenerateKeyPair() c := &Config{} - c.Node.StaticPubKey = sPK - c.Node.StaticSecKey = sSK + c.Visor.StaticPubKey = sPK + c.Visor.StaticSecKey = sSK c.Transport.Discovery = "foo" c.Routing.SetupNodes = []cipher.PubKey{sPK} c.Routing.RouteFinder = "foo" t.Run("Report all the services as available", func(t *testing.T) { - rpc := &RPC{&Node{conf: c}} + rpc := &RPC{&Visor{conf: c}} h := &HealthInfo{} err := rpc.Health(nil, h) require.NoError(t, err) @@ -47,7 +47,7 @@ func TestHealth(t *testing.T) { }) t.Run("Report as unavailable", func(t *testing.T) { - rpc := &RPC{&Node{conf: &Config{}}} + rpc := &RPC{&Visor{conf: &Config{}}} h := &HealthInfo{} err := rpc.Health(nil, h) require.NoError(t, err) @@ -58,7 +58,7 @@ func TestHealth(t *testing.T) { } func TestUptime(t *testing.T) { - rpc := &RPC{&Node{startedAt: time.Now()}} + rpc := &RPC{&Visor{startedAt: time.Now()}} time.Sleep(time.Second) var res float64 err := rpc.Uptime(nil, &res) @@ -90,12 +90,12 @@ func TestListApps(t *testing.T) { pm.On("Exists", apps["foo"].App).Return(false) pm.On("Exists", apps["bar"].App).Return(true) - n := Node{ + n := Visor{ appsConf: apps, procManager: pm, } - rpc := &RPC{node: &n} + rpc := &RPC{visor: &n} var reply []*AppState require.NoError(t, rpc.Apps(nil, &reply)) @@ -103,7 +103,7 @@ func TestListApps(t *testing.T) { app1, app2 := reply[0], reply[1] if app1.Name != "foo" { - // apps inside node are stored inside a map, so their order + // apps inside visor are stored inside a map, so their order // is not deterministic, we should be ready for this and // rearrange the outer array to check values correctly app1, app2 = reply[1], reply[0] @@ -146,9 +146,9 @@ func TestStartStopApp(t *testing.T) { app := apps["foo"].App nodeCfg := Config{} - nodeCfg.Node.StaticPubKey = pk + nodeCfg.Visor.StaticPubKey = pk - node := &Node{ + node := &Visor{ router: r, appsConf: apps, logger: logging.MustGetLogger("test"), @@ -164,7 +164,7 @@ func TestStartStopApp(t *testing.T) { Name: app, Version: apps["foo"].Version, SockFilePath: nodeCfg.AppServerSockFile, - VisorPK: nodeCfg.Node.StaticPubKey.Hex(), + VisorPK: nodeCfg.Visor.StaticPubKey.Hex(), WorkDir: filepath.Join("", app, fmt.Sprintf("v%s", apps["foo"].Version)), } appArgs1 := append([]string{filepath.Join(node.dir(), app)}, apps["foo"].Args...) @@ -178,7 +178,7 @@ func TestStartStopApp(t *testing.T) { node.procManager = pm - rpc := &RPC{node: node} + rpc := &RPC{visor: node} err := rpc.StartApp(&unknownApp, nil) require.Error(t, err) @@ -227,8 +227,8 @@ These tests have been commented out for the following reasons: // {App: "bar", Version: "2.0", AutoStart: false, Port: 20}, // } // conf := &Config{} -// conf.Node.StaticPubKey = pk1 -// node := &Node{ +// conf.Visor.StaticPubKey = pk1 +// visor := &Visor{ // config: conf, // router: r, // tm: tm1, @@ -238,17 +238,17 @@ These tests have been commented out for the following reasons: // startedApps: map[string]*appBind{}, // logger: logging.MustGetLogger("test"), // } -// pathutil.EnsureDir(node.dir()) +// pathutil.EnsureDir(visor.dir()) // defer func() { -// if err := os.RemoveAll(node.dir()); err != nil { +// if err := os.RemoveAll(visor.dir()); err != nil { // log.WithError(err).Warn(err) // } // }() // -// require.NoError(t, node.StartApp("foo")) +// require.NoError(t, visor.StartApp("foo")) // // time.Sleep(time.Second) -// gateway := &RPC{node: node} +// gateway := &RPC{visor: visor} // // sConn, cConn := net.Pipe() // defer func() { @@ -345,10 +345,10 @@ These tests have been commented out for the following reasons: // assert.Equal(t, ErrUnknownApp, err) // // require.NoError(t, gateway.SetAutoStart(&in2, &struct{}{})) -// assert.True(t, node.appsConf[0].AutoStart) +// assert.True(t, visor.appsConf[0].AutoStart) // // require.NoError(t, gateway.SetAutoStart(&in3, &struct{}{})) -// assert.False(t, node.appsConf[0].AutoStart) +// assert.False(t, visor.appsConf[0].AutoStart) // // // Test with RPC Client // @@ -357,10 +357,10 @@ These tests have been commented out for the following reasons: // assert.Equal(t, ErrUnknownApp.Error(), err.Error()) // // require.NoError(t, client.SetAutoStart(in2.AppName, in2.AutoStart)) -// assert.True(t, node.appsConf[0].AutoStart) +// assert.True(t, visor.appsConf[0].AutoStart) // // require.NoError(t, client.SetAutoStart(in3.AppName, in3.AutoStart)) -// assert.False(t, node.appsConf[0].AutoStart) +// assert.False(t, visor.appsConf[0].AutoStart) // }) // // t.Run("TransportTypes", func(t *testing.T) { @@ -378,7 +378,7 @@ These tests have been commented out for the following reasons: // // t.Run("Transport", func(t *testing.T) { // var ids []uuid.UUID -// node.tm.WalkTransports(func(tp *transport.ManagedTransport) bool { +// visor.tm.WalkTransports(func(tp *transport.ManagedTransport) bool { // ids = append(ids, tp.RuleEntry.ID) // return true // }) diff --git a/pkg/visor/visor.go b/pkg/visor/visor.go index 6385e82df..ba155c197 100644 --- a/pkg/visor/visor.go +++ b/pkg/visor/visor.go @@ -52,7 +52,7 @@ const ( var ( // ErrUnknownApp represents lookup error for App related calls. ErrUnknownApp = errors.New("unknown app") - // ErrNoConfigPath is returned on attempt to read/write config when node contains no config path. + // ErrNoConfigPath is returned on attempt to read/write config when visor contains no config path. ErrNoConfigPath = errors.New("no config path") ) @@ -68,9 +68,9 @@ type AppState struct { Status AppStatus `json:"status"` } -// Node provides messaging runtime for Apps by setting up all +// Visor provides messaging runtime for Apps by setting up all // necessary connections and performing messaging gateway functions. -type Node struct { +type Visor struct { conf *Config router router.Router n *snet.Network @@ -98,32 +98,32 @@ type Node struct { appRPCServer *appserver.Server } -// NewNode constructs new Node. -func NewNode(cfg *Config, logger *logging.MasterLogger, restartCtx *restart.Context, cfgPath *string) (*Node, error) { +// NewVisor constructs new Visor. +func NewVisor(cfg *Config, logger *logging.MasterLogger, restartCtx *restart.Context, cfgPath *string) (*Visor, error) { ctx := context.Background() - node := &Node{ + visor := &Visor{ conf: cfg, confPath: cfgPath, } - node.Logger = logger - node.logger = node.Logger.PackageLogger("skywire") + visor.Logger = logger + visor.logger = visor.Logger.PackageLogger("skywire") restartCheckDelay, err := time.ParseDuration(cfg.RestartCheckDelay) if err == nil { restartCtx.SetCheckDelay(restartCheckDelay) } - restartCtx.RegisterLogger(node.logger) + restartCtx.RegisterLogger(visor.logger) - node.restartCtx = restartCtx + visor.restartCtx = restartCtx - pk := cfg.Node.StaticPubKey - sk := cfg.Node.StaticSecKey + pk := cfg.Visor.StaticPubKey + sk := cfg.Visor.StaticSecKey fmt.Println("min sessions:", cfg.Dmsg.SessionsCount) - node.n = snet.New(snet.Config{ + visor.n = snet.New(snet.Config{ PubKey: pk, SecKey: sk, TpNetworks: []string{dmsg.Type, snet.STcpType}, // TODO: Have some way to configure this. @@ -132,16 +132,16 @@ func NewNode(cfg *Config, logger *logging.MasterLogger, restartCtx *restart.Cont STCPLocalAddr: cfg.STCP.LocalAddr, STCPTable: cfg.STCP.PubKeyTable, }) - if err := node.n.Init(ctx); err != nil { + if err := visor.n.Init(ctx); err != nil { return nil, fmt.Errorf("failed to init network: %v", err) } if cfg.DmsgPty != nil { - pty, err := cfg.DmsgPtyHost(node.n.Dmsg()) + pty, err := cfg.DmsgPtyHost(visor.n.Dmsg()) if err != nil { return nil, fmt.Errorf("failed to setup pty: %v", err) } - node.pty = pty + visor.pty = pty } logger.Info("'dmsgpty' is not configured, skipping...") @@ -157,53 +157,53 @@ func NewNode(cfg *Config, logger *logging.MasterLogger, restartCtx *restart.Cont tmConfig := &transport.ManagerConfig{ PubKey: pk, SecKey: sk, - DefaultNodes: cfg.TrustedNodes, + DefaultVisors: cfg.TrustedVisors, DiscoveryClient: trDiscovery, LogStore: logStore, } - node.tm, err = transport.NewManager(node.n, tmConfig) + visor.tm, err = transport.NewManager(visor.n, tmConfig) if err != nil { return nil, fmt.Errorf("transport manager: %s", err) } - node.rt, err = cfg.RoutingTable() + visor.rt, err = cfg.RoutingTable() if err != nil { return nil, fmt.Errorf("routing table: %s", err) } rConfig := &router.Config{ - Logger: node.Logger.PackageLogger("router"), + Logger: visor.Logger.PackageLogger("router"), PubKey: pk, SecKey: sk, - TransportManager: node.tm, - RoutingTable: node.rt, + TransportManager: visor.tm, + RoutingTable: visor.rt, RouteFinder: rfclient.NewHTTP(cfg.Routing.RouteFinder, time.Duration(cfg.Routing.RouteFinderTimeout)), SetupNodes: cfg.Routing.SetupNodes, } - r, err := router.New(node.n, rConfig) + r, err := router.New(visor.n, rConfig) if err != nil { return nil, fmt.Errorf("failed to setup router: %v", err) } - node.router = r + visor.router = r - node.appsConf, err = cfg.AppsConfig() + visor.appsConf, err = cfg.AppsConfig() if err != nil { return nil, fmt.Errorf("invalid AppsConfig: %s", err) } - node.appsPath, err = cfg.AppsDir() + visor.appsPath, err = cfg.AppsDir() if err != nil { return nil, fmt.Errorf("invalid AppsPath: %s", err) } - node.localPath, err = cfg.LocalDir() + visor.localPath, err = cfg.LocalDir() if err != nil { return nil, fmt.Errorf("invalid LocalPath: %s", err) } if lvl, err := logging.LevelFromString(cfg.LogLevel); err == nil { - node.Logger.SetLevel(lvl) + visor.Logger.SetLevel(lvl) } if cfg.Interfaces.RPCAddress != "" { @@ -211,10 +211,10 @@ func NewNode(cfg *Config, logger *logging.MasterLogger, restartCtx *restart.Cont if err != nil { return nil, fmt.Errorf("failed to setup RPC listener: %s", err) } - node.rpcListener = l + visor.rpcListener = l } - node.rpcDialers = make([]*RPCClientDialer, len(cfg.Hypervisors)) + visor.rpcDialers = make([]*RPCClientDialer, len(cfg.Hypervisors)) for i, entry := range cfg.Hypervisors { _, rpcPort, err := httputil.SplitRPCAddr(entry.Addr) @@ -222,25 +222,25 @@ func NewNode(cfg *Config, logger *logging.MasterLogger, restartCtx *restart.Cont return nil, fmt.Errorf("failed to parse rpc port from rpc address: %s", err) } - node.rpcDialers[i] = NewRPCClientDialer(node.n, entry.PubKey, rpcPort) + visor.rpcDialers[i] = NewRPCClientDialer(visor.n, entry.PubKey, rpcPort) } - node.appRPCServer = appserver.New(logging.MustGetLogger("app_rpc_server"), node.conf.AppServerSockFile) + visor.appRPCServer = appserver.New(logging.MustGetLogger("app_rpc_server"), visor.conf.AppServerSockFile) go func() { - if err := node.appRPCServer.ListenAndServe(); err != nil { - node.logger.WithError(err).Error("error serving RPC") + if err := visor.appRPCServer.ListenAndServe(); err != nil { + visor.logger.WithError(err).Error("error serving RPC") } }() - node.procManager = appserver.NewProcManager(logging.MustGetLogger("proc_manager"), node.appRPCServer) + visor.procManager = appserver.NewProcManager(logging.MustGetLogger("proc_manager"), visor.appRPCServer) - return node, err + return visor, err } // Start spawns auto-started Apps, starts router and RPC interfaces . -func (node *Node) Start() error { - skywireNetworker := appnet.NewSkywireNetworker(logging.MustGetLogger("skynet"), node.router) +func (visor *Visor) Start() error { + skywireNetworker := appnet.NewSkywireNetworker(logging.MustGetLogger("skynet"), visor.router) if err := appnet.AddNetworker(appnet.TypeSkynet, skywireNetworker); err != nil { return fmt.Errorf("failed to add skywire networker: %v", err) } @@ -248,63 +248,63 @@ func (node *Node) Start() error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - node.startedAt = time.Now() + visor.startedAt = time.Now() // Start pty. - if node.pty != nil { - go node.pty.ServeRemoteRequests(ctx) - go node.pty.ServeCLIRequests(ctx) + if visor.pty != nil { + go visor.pty.ServeRemoteRequests(ctx) + go visor.pty.ServeCLIRequests(ctx) } - pathutil.EnsureDir(node.dir()) - node.closePreviousApps() + pathutil.EnsureDir(visor.dir()) + visor.closePreviousApps() - for _, ac := range node.appsConf { + for _, ac := range visor.appsConf { if !ac.AutoStart { continue } go func(a AppConfig) { - if err := node.SpawnApp(&a, nil); err != nil { - node.logger.Warnf("App %s stopped working: %v", a.App, err) + if err := visor.SpawnApp(&a, nil); err != nil { + visor.logger.Warnf("App %s stopped working: %v", a.App, err) } }(ac) } rpcSvr := rpc.NewServer() - if err := rpcSvr.RegisterName(RPCPrefix, &RPC{node: node}); err != nil { + if err := rpcSvr.RegisterName(RPCPrefix, &RPC{visor: visor}); err != nil { return fmt.Errorf("rpc server created failed: %s", err) } - if node.rpcListener != nil { - node.logger.Info("Starting RPC interface on ", node.rpcListener.Addr()) + if visor.rpcListener != nil { + visor.logger.Info("Starting RPC interface on ", visor.rpcListener.Addr()) - go rpcSvr.Accept(node.rpcListener) + go rpcSvr.Accept(visor.rpcListener) } - for _, dialer := range node.rpcDialers { + for _, dialer := range visor.rpcDialers { go func(dialer *RPCClientDialer) { if err := dialer.Run(rpcSvr, time.Second); err != nil { - node.logger.Errorf("Hypervisor Dmsg Dial exited with error: %v", err) + visor.logger.Errorf("Hypervisor Dmsg Dial exited with error: %v", err) } }(dialer) } - node.logger.Info("Starting packet router") + visor.logger.Info("Starting packet router") - if err := node.router.Serve(ctx); err != nil { - return fmt.Errorf("failed to start Node: %s", err) + if err := visor.router.Serve(ctx); err != nil { + return fmt.Errorf("failed to start Visor: %s", err) } return nil } -func (node *Node) dir() string { - return pathutil.NodeDir(node.conf.Node.StaticPubKey) +func (visor *Visor) dir() string { + return pathutil.VisorDir(visor.conf.Visor.StaticPubKey) } -func (node *Node) pidFile() *os.File { - f, err := os.OpenFile(filepath.Join(node.dir(), "apps-pid.txt"), os.O_RDWR|os.O_CREATE, 0600) +func (visor *Visor) pidFile() *os.File { + f, err := os.OpenFile(filepath.Join(visor.dir(), "apps-pid.txt"), os.O_RDWR|os.O_CREATE, 0600) if err != nil { panic(err) } @@ -312,13 +312,13 @@ func (node *Node) pidFile() *os.File { return f } -func (node *Node) closePreviousApps() { - node.logger.Info("killing previously ran apps if any...") +func (visor *Visor) closePreviousApps() { + visor.logger.Info("killing previously ran apps if any...") - pids := node.pidFile() + pids := visor.pidFile() defer func() { if err := pids.Close(); err != nil { - node.logger.Warnf("error closing PID file: %s", err) + visor.logger.Warnf("error closing PID file: %s", err) } }() @@ -326,26 +326,26 @@ func (node *Node) closePreviousApps() { for scanner.Scan() { appInfo := strings.Split(scanner.Text(), " ") if len(appInfo) != 2 { - node.logger.Fatalf("error parsing %s. Err: %s", pids.Name(), errors.New("line should be: [app name] [pid]")) + visor.logger.Fatalf("error parsing %s. Err: %s", pids.Name(), errors.New("line should be: [app name] [pid]")) } pid, err := strconv.Atoi(appInfo[1]) if err != nil { - node.logger.Fatalf("error parsing %s. Err: %s", pids.Name(), err) + visor.logger.Fatalf("error parsing %s. Err: %s", pids.Name(), err) } - node.stopUnhandledApp(appInfo[0], pid) + visor.stopUnhandledApp(appInfo[0], pid) } // empty file pathutil.AtomicWriteFile(pids.Name(), []byte{}) } -func (node *Node) stopUnhandledApp(name string, pid int) { +func (visor *Visor) stopUnhandledApp(name string, pid int) { p, err := os.FindProcess(pid) if err != nil { if runtime.GOOS != "windows" { - node.logger.Infof("Previous app %s ran by this node with pid: %d not found", name, pid) + visor.logger.Infof("Previous app %s ran by this visor with pid: %d not found", name, pid) } return } @@ -355,68 +355,68 @@ func (node *Node) stopUnhandledApp(name string, pid int) { return } - node.logger.Infof("Found and killed hanged app %s with pid %d previously ran by this node", name, pid) + visor.logger.Infof("Found and killed hanged app %s with pid %d previously ran by this visor", name, pid) } -// Close safely stops spawned Apps and messaging Node. -func (node *Node) Close() (err error) { - if node == nil { +// Close safely stops spawned Apps and messaging Visor. +func (visor *Visor) Close() (err error) { + if visor == nil { return nil } - if node.rpcListener != nil { - if err = node.rpcListener.Close(); err != nil { - node.logger.WithError(err).Error("failed to stop RPC interface") + if visor.rpcListener != nil { + if err = visor.rpcListener.Close(); err != nil { + visor.logger.WithError(err).Error("failed to stop RPC interface") } else { - node.logger.Info("RPC interface stopped successfully") + visor.logger.Info("RPC interface stopped successfully") } } - for i, dialer := range node.rpcDialers { + for i, dialer := range visor.rpcDialers { if err = dialer.Close(); err != nil { - node.logger.WithError(err).Errorf("(%d) failed to stop RPC dialer", i) + visor.logger.WithError(err).Errorf("(%d) failed to stop RPC dialer", i) } else { - node.logger.Infof("(%d) RPC dialer closed successfully", i) + visor.logger.Infof("(%d) RPC dialer closed successfully", i) } } - node.procManager.StopAll() + visor.procManager.StopAll() - if err = node.router.Close(); err != nil { - node.logger.WithError(err).Error("failed to stop router") + if err = visor.router.Close(); err != nil { + visor.logger.WithError(err).Error("failed to stop router") } else { - node.logger.Info("router stopped successfully") + visor.logger.Info("router stopped successfully") } - if err := node.appRPCServer.Close(); err != nil { - node.logger.WithError(err).Error("error closing RPC server") + if err := visor.appRPCServer.Close(); err != nil { + visor.logger.WithError(err).Error("error closing RPC server") } - if err := UnlinkSocketFiles(node.conf.AppServerSockFile); err != nil { - node.logger.WithError(err).Errorf("Failed to unlink socket file %s", node.conf.AppServerSockFile) + if err := UnlinkSocketFiles(visor.conf.AppServerSockFile); err != nil { + visor.logger.WithError(err).Errorf("Failed to unlink socket file %s", visor.conf.AppServerSockFile) } else { - node.logger.Infof("Socket file %s removed successfully", node.conf.AppServerSockFile) + visor.logger.Infof("Socket file %s removed successfully", visor.conf.AppServerSockFile) } return err } // Exec executes a shell command. It returns combined stdout and stderr output and an error. -func (node *Node) Exec(command string) ([]byte, error) { +func (visor *Visor) Exec(command string) ([]byte, error) { args := strings.Split(command, " ") cmd := exec.Command(args[0], args[1:]...) // nolint: gosec return cmd.CombinedOutput() } // Apps returns list of AppStates for all registered apps. -func (node *Node) Apps() []*AppState { +func (visor *Visor) Apps() []*AppState { // TODO: move app states to the app module res := make([]*AppState, 0) - for _, app := range node.appsConf { + for _, app := range visor.appsConf { state := &AppState{app.App, app.AutoStart, app.Port, AppStatusStopped} - if node.procManager.Exists(app.App) { + if visor.procManager.Exists(app.App) { state.Status = AppStatusRunning } @@ -427,14 +427,14 @@ func (node *Node) Apps() []*AppState { } // StartApp starts registered App. -func (node *Node) StartApp(appName string) error { - for _, app := range node.appsConf { +func (visor *Visor) StartApp(appName string) error { + for _, app := range visor.appsConf { if app.App == appName { startCh := make(chan struct{}) go func(app AppConfig) { - if err := node.SpawnApp(&app, startCh); err != nil { - node.logger.Warnf("App %s stopped working: %v", appName, err) + if err := visor.SpawnApp(&app, startCh); err != nil { + visor.logger.Warnf("App %s stopped working: %v", appName, err) } }(app) @@ -447,8 +447,8 @@ func (node *Node) StartApp(appName string) error { } // SpawnApp configures and starts new App. -func (node *Node) SpawnApp(config *AppConfig, startCh chan<- struct{}) (err error) { - node.logger.Infof("Starting %s.v%s", config.App, config.Version) +func (visor *Visor) SpawnApp(config *AppConfig, startCh chan<- struct{}) (err error) { + visor.logger.Infof("Starting %s.v%s", config.App, config.Version) if app, ok := reservedPorts[config.Port]; ok && app != config.App { return fmt.Errorf("can't bind to reserved port %d", config.Port) @@ -457,10 +457,10 @@ func (node *Node) SpawnApp(config *AppConfig, startCh chan<- struct{}) (err erro appCfg := appcommon.Config{ Name: config.App, Version: config.Version, - SockFilePath: node.conf.AppServerSockFile, - VisorPK: node.conf.Node.StaticPubKey.Hex(), - BinaryDir: node.appsPath, - WorkDir: filepath.Join(node.localPath, config.App, fmt.Sprintf("v%s", config.Version)), + SockFilePath: visor.conf.AppServerSockFile, + VisorPK: visor.conf.Visor.StaticPubKey.Hex(), + BinaryDir: visor.appsPath, + WorkDir: filepath.Join(visor.localPath, config.App, fmt.Sprintf("v%s", config.Version)), } if _, err := ensureDir(appCfg.WorkDir); err != nil { @@ -468,8 +468,8 @@ func (node *Node) SpawnApp(config *AppConfig, startCh chan<- struct{}) (err erro } // TODO: make PackageLogger return *RuleEntry. FieldLogger doesn't expose Writer. - logger := node.logger.WithField("_module", fmt.Sprintf("%s.v%s", config.App, config.Version)).Writer() - errLogger := node.logger.WithField("_module", fmt.Sprintf("%s.v%s[ERROR]", config.App, config.Version)).Writer() + logger := visor.logger.WithField("_module", fmt.Sprintf("%s.v%s", config.App, config.Version)).Writer() + errLogger := visor.logger.WithField("_module", fmt.Sprintf("%s.v%s[ERROR]", config.App, config.Version)).Writer() defer func() { if logErr := logger.Close(); err == nil && logErr != nil { @@ -482,9 +482,9 @@ func (node *Node) SpawnApp(config *AppConfig, startCh chan<- struct{}) (err erro }() appLogger := logging.MustGetLogger(fmt.Sprintf("app_%s", config.App)) - appArgs := append([]string{filepath.Join(node.dir(), config.App)}, config.Args...) + appArgs := append([]string{filepath.Join(visor.dir(), config.App)}, config.Args...) - pid, err := node.procManager.Start(appLogger, appCfg, appArgs, logger, errLogger) + pid, err := visor.procManager.Start(appLogger, appCfg, appArgs, logger, errLogger) if err != nil { return fmt.Errorf("error running app %s: %v", config.App, err) } @@ -493,34 +493,34 @@ func (node *Node) SpawnApp(config *AppConfig, startCh chan<- struct{}) (err erro startCh <- struct{}{} } - node.pidMu.Lock() - node.logger.Infof("storing app %s pid %d", config.App, pid) - node.persistPID(config.App, pid) - node.pidMu.Unlock() + visor.pidMu.Lock() + visor.logger.Infof("storing app %s pid %d", config.App, pid) + visor.persistPID(config.App, pid) + visor.pidMu.Unlock() - return node.procManager.Wait(config.App) + return visor.procManager.Wait(config.App) } -func (node *Node) persistPID(name string, pid appcommon.ProcID) { - pidF := node.pidFile() +func (visor *Visor) persistPID(name string, pid appcommon.ProcID) { + pidF := visor.pidFile() pidFName := pidF.Name() if err := pidF.Close(); err != nil { - node.logger.WithError(err).Warn("Failed to close PID file") + visor.logger.WithError(err).Warn("Failed to close PID file") } pathutil.AtomicAppendToFile(pidFName, []byte(fmt.Sprintf("%s %d\n", name, pid))) } // StopApp stops running App. -func (node *Node) StopApp(appName string) error { - node.logger.Infof("Stopping app %s and closing ports", appName) +func (visor *Visor) StopApp(appName string) error { + visor.logger.Infof("Stopping app %s and closing ports", appName) - if !node.procManager.Exists(appName) { + if !visor.procManager.Exists(appName) { return ErrUnknownApp } - if err := node.procManager.Stop(appName); err != nil { - node.logger.Warn("Failed to stop app: ", err) + if err := visor.procManager.Stop(appName); err != nil { + visor.logger.Warn("Failed to stop app: ", err) return err } @@ -528,43 +528,43 @@ func (node *Node) StopApp(appName string) error { } // RestartApp restarts running App. -func (node *Node) RestartApp(name string) error { - node.logger.Infof("Restarting app %v", name) +func (visor *Visor) RestartApp(name string) error { + visor.logger.Infof("Restarting app %v", name) - if err := node.StopApp(name); err != nil { + if err := visor.StopApp(name); err != nil { return fmt.Errorf("stop app %v: %w", name, err) } - if err := node.StartApp(name); err != nil { + if err := visor.StartApp(name); err != nil { return fmt.Errorf("start app %v: %w", name, err) } return nil } -func (node *Node) setAutoStart(appName string, autoStart bool) error { - appConf, ok := node.appsConf[appName] +func (visor *Visor) setAutoStart(appName string, autoStart bool) error { + appConf, ok := visor.appsConf[appName] if !ok { return ErrUnknownApp } appConf.AutoStart = autoStart - node.appsConf[appName] = appConf + visor.appsConf[appName] = appConf - return node.updateConfigAppAutoStart(appName, autoStart) + return visor.updateConfigAppAutoStart(appName, autoStart) } -func (node *Node) updateConfigAppAutoStart(appName string, autoStart bool) error { - if node.confPath == nil { +func (visor *Visor) updateConfigAppAutoStart(appName string, autoStart bool) error { + if visor.confPath == nil { return nil } - config, err := node.readConfig() + config, err := visor.readConfig() if err != nil { return err } - node.logger.Infof("Saving auto start = %v for app %v to config", autoStart, appName) + visor.logger.Infof("Saving auto start = %v for app %v to config", autoStart, appName) changed := false @@ -580,11 +580,11 @@ func (node *Node) updateConfigAppAutoStart(appName string, autoStart bool) error return nil } - return node.writeConfig(config) + return visor.writeConfig(config) } -func (node *Node) setSocksPassword(password string) error { - node.logger.Infof("Changing skysocks password to %q", password) +func (visor *Visor) setSocksPassword(password string) error { + visor.logger.Infof("Changing skysocks password to %q", password) const ( socksName = "skysocks" @@ -592,20 +592,20 @@ func (node *Node) setSocksPassword(password string) error { ) updateFunc := func(config *Config) { - node.updateArg(config, socksName, passcodeArgName, password) + visor.updateArg(config, socksName, passcodeArgName, password) } - if err := node.updateConfig(updateFunc); err != nil { + if err := visor.updateConfig(updateFunc); err != nil { return err } - node.logger.Infof("Updated %v password, restarting it", socksName) + visor.logger.Infof("Updated %v password, restarting it", socksName) - return node.RestartApp(socksName) + return visor.RestartApp(socksName) } -func (node *Node) setSocksClientPK(pk cipher.PubKey) error { - node.logger.Infof("Changing skysocks-client PK to %q", pk) +func (visor *Visor) setSocksClientPK(pk cipher.PubKey) error { + visor.logger.Infof("Changing skysocks-client PK to %q", pk) const ( socksClientName = "skysocks-client" @@ -613,19 +613,19 @@ func (node *Node) setSocksClientPK(pk cipher.PubKey) error { ) updateFunc := func(config *Config) { - node.updateArg(config, socksClientName, pkArgName, pk.String()) + visor.updateArg(config, socksClientName, pkArgName, pk.String()) } - if err := node.updateConfig(updateFunc); err != nil { + if err := visor.updateConfig(updateFunc); err != nil { return err } - node.logger.Infof("Updated %v PK, restarting it", socksClientName) + visor.logger.Infof("Updated %v PK, restarting it", socksClientName) - return node.RestartApp(socksClientName) + return visor.RestartApp(socksClientName) } -func (node *Node) updateArg(config *Config, appName, argName, value string) { +func (visor *Visor) updateArg(config *Config, appName, argName, value string) { changed := false for i := range config.Apps { @@ -647,27 +647,27 @@ func (node *Node) updateArg(config *Config, appName, argName, value string) { } } -func (node *Node) updateConfig(f func(*Config)) error { - if node.confPath == nil { +func (visor *Visor) updateConfig(f func(*Config)) error { + if visor.confPath == nil { return nil } - config, err := node.readConfig() + config, err := visor.readConfig() if err != nil { return err } f(config) - return node.writeConfig(config) + return visor.writeConfig(config) } -func (node *Node) readConfig() (*Config, error) { - if node.confPath == nil { +func (visor *Visor) readConfig() (*Config, error) { + if visor.confPath == nil { return nil, ErrNoConfigPath } - configPath := *node.confPath + configPath := *visor.confPath bytes, err := ioutil.ReadFile(filepath.Clean(configPath)) if err != nil { @@ -682,14 +682,14 @@ func (node *Node) readConfig() (*Config, error) { return &config, nil } -func (node *Node) writeConfig(config *Config) error { - if node.confPath == nil { +func (visor *Visor) writeConfig(config *Config) error { + if visor.confPath == nil { return ErrNoConfigPath } - configPath := *node.confPath + configPath := *visor.confPath - node.logger.Infof("Updating visor config to %+v", config) + visor.logger.Infof("Updating visor config to %+v", config) bytes, err := json.MarshalIndent(config, "", "\t") if err != nil { diff --git a/pkg/visor/visor_test.go b/pkg/visor/visor_test.go index db11312d9..b7395489f 100644 --- a/pkg/visor/visor_test.go +++ b/pkg/visor/visor_test.go @@ -45,7 +45,7 @@ func TestMain(m *testing.M) { } // TODO(nkryuchkov): fix and uncomment -//func TestNewNode(t *testing.T) { +//func TestNewVisor(t *testing.T) { // pk, sk := cipher.GenerateKeyPair() // srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // require.NoError(t, json.NewEncoder(w).Encode(&httpauth.NextNonceResponse{Edge: pk, NextNonce: 1})) @@ -53,8 +53,8 @@ func TestMain(m *testing.M) { // defer srv.Close() // // conf := Config{Version: "1.0", LocalPath: "local", AppsPath: "apps"} -// conf.Node.StaticPubKey = pk -// conf.Node.StaticSecKey = sk +// conf.Visor.StaticPubKey = pk +// conf.Visor.StaticSecKey = sk // conf.Dmsg.Discovery = "http://skywire.skycoin.com:8001" // conf.Dmsg.ServerCount = 10 // conf.Transport.Discovery = srv.URL @@ -67,17 +67,17 @@ func TestMain(m *testing.M) { // require.NoError(t, os.RemoveAll("local")) // }() // -// node, err := NewNode(&conf, masterLogger) +// visor, err := NewVisor(&conf, masterLogger) // require.NoError(t, err) // -// assert.NotNil(t, node.router) -// assert.NotNil(t, node.appsConf) -// assert.NotNil(t, node.appsPath) -// assert.NotNil(t, node.localPath) -// assert.NotNil(t, node.startedApps) +// assert.NotNil(t, visor.router) +// assert.NotNil(t, visor.appsConf) +// assert.NotNil(t, visor.appsPath) +// assert.NotNil(t, visor.localPath) +// assert.NotNil(t, visor.startedApps) //} -func TestNodeStartClose(t *testing.T) { +func TestVisorStartClose(t *testing.T) { r := &router.MockRouter{} r.On("Serve", mock.Anything /* context */).Return(testhelpers.NoErr) r.On("Close").Return(testhelpers.NoErr) @@ -106,13 +106,13 @@ func TestNodeStartClose(t *testing.T) { }() var ( - nodeCfg = Config{} + visorCfg = Config{} logger = logging.MustGetLogger("test") - server = appserver.New(logger, nodeCfg.AppServerSockFile) + server = appserver.New(logger, visorCfg.AppServerSockFile) ) - node := &Node{ - conf: &nodeCfg, + visor := &Visor{ + conf: &visorCfg, router: r, appsConf: apps, logger: logger, @@ -123,11 +123,11 @@ func TestNodeStartClose(t *testing.T) { appCfg1 := appcommon.Config{ Name: apps["skychat"].App, Version: apps["skychat"].Version, - SockFilePath: nodeCfg.AppServerSockFile, - VisorPK: nodeCfg.Node.StaticPubKey.Hex(), + SockFilePath: visorCfg.AppServerSockFile, + VisorPK: visorCfg.Visor.StaticPubKey.Hex(), WorkDir: filepath.Join("", apps["skychat"].App, fmt.Sprintf("v%s", apps["skychat"].Version)), } - appArgs1 := append([]string{filepath.Join(node.dir(), apps["skychat"].App)}, apps["skychat"].Args...) + appArgs1 := append([]string{filepath.Join(visor.dir(), apps["skychat"].App)}, apps["skychat"].Args...) appPID1 := appcommon.ProcID(10) pm.On("Start", mock.Anything, appCfg1, appArgs1, mock.Anything, mock.Anything). Return(appPID1, testhelpers.NoErr) @@ -135,7 +135,7 @@ func TestNodeStartClose(t *testing.T) { pm.On("StopAll").Return() - node.procManager = pm + visor.procManager = pm dmsgC := dmsg.NewClient(cipher.PubKey{}, cipher.SecKey{}, disc.NewMock(), nil) go dmsgC.Serve() @@ -155,20 +155,20 @@ func TestNodeStartClose(t *testing.T) { } tm, err := transport.NewManager(network, tmConf) - node.tm = tm + visor.tm = tm require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- node.Start() + errCh <- visor.Start() }() require.NoError(t, <-errCh) time.Sleep(100 * time.Millisecond) - require.NoError(t, node.Close()) + require.NoError(t, visor.Close()) } -func TestNodeSpawnApp(t *testing.T) { +func TestVisorSpawnApp(t *testing.T) { pk, _ := cipher.GenerateKeyPair() r := &router.MockRouter{} r.On("Serve", mock.Anything /* context */).Return(testhelpers.NoErr) @@ -189,29 +189,29 @@ func TestNodeSpawnApp(t *testing.T) { apps := make(map[string]AppConfig) apps["skychat"] = app - nodeCfg := Config{} - nodeCfg.Node.StaticPubKey = pk + visorCfg := Config{} + visorCfg.Visor.StaticPubKey = pk - node := &Node{ + visor := &Visor{ router: r, appsConf: apps, logger: logging.MustGetLogger("test"), - conf: &nodeCfg, + conf: &visorCfg, } - pathutil.EnsureDir(node.dir()) + pathutil.EnsureDir(visor.dir()) defer func() { - require.NoError(t, os.RemoveAll(node.dir())) + require.NoError(t, os.RemoveAll(visor.dir())) }() pm := &appserver.MockProcManager{} appCfg := appcommon.Config{ Name: app.App, Version: app.Version, - SockFilePath: nodeCfg.AppServerSockFile, - VisorPK: nodeCfg.Node.StaticPubKey.Hex(), + SockFilePath: visorCfg.AppServerSockFile, + VisorPK: visorCfg.Visor.StaticPubKey.Hex(), WorkDir: filepath.Join("", app.App, fmt.Sprintf("v%s", app.Version)), } - appArgs := append([]string{filepath.Join(node.dir(), app.App)}, app.Args...) + appArgs := append([]string{filepath.Join(visor.dir(), app.App)}, app.Args...) pm.On("Wait", app.App).Return(testhelpers.NoErr) appPID := appcommon.ProcID(10) @@ -220,17 +220,17 @@ func TestNodeSpawnApp(t *testing.T) { pm.On("Exists", app.App).Return(true) pm.On("Stop", app.App).Return(testhelpers.NoErr) - node.procManager = pm + visor.procManager = pm - require.NoError(t, node.StartApp(app.App)) + require.NoError(t, visor.StartApp(app.App)) time.Sleep(100 * time.Millisecond) - require.True(t, node.procManager.Exists(app.App)) + require.True(t, visor.procManager.Exists(app.App)) - require.NoError(t, node.StopApp(app.App)) + require.NoError(t, visor.StopApp(app.App)) } -func TestNodeSpawnAppValidations(t *testing.T) { +func TestVisorSpawnAppValidations(t *testing.T) { pk, _ := cipher.GenerateKeyPair() r := &router.MockRouter{} r.On("Serve", mock.Anything /* context */).Return(testhelpers.NoErr) @@ -241,16 +241,16 @@ func TestNodeSpawnAppValidations(t *testing.T) { }() c := &Config{} - c.Node.StaticPubKey = pk + c.Visor.StaticPubKey = pk - node := &Node{ + visor := &Visor{ router: r, logger: logging.MustGetLogger("test"), conf: c, } - pathutil.EnsureDir(node.dir()) + pathutil.EnsureDir(visor.dir()) defer func() { - require.NoError(t, os.RemoveAll(node.dir())) + require.NoError(t, os.RemoveAll(visor.dir())) }() t.Run("fail - can't bind to reserved port", func(t *testing.T) { @@ -266,21 +266,21 @@ func TestNodeSpawnAppValidations(t *testing.T) { Name: app.App, Version: app.Version, SockFilePath: c.AppServerSockFile, - VisorPK: c.Node.StaticPubKey.Hex(), + VisorPK: c.Visor.StaticPubKey.Hex(), WorkDir: filepath.Join("", app.App, fmt.Sprintf("v%s", app.Version)), } - appArgs := append([]string{filepath.Join(node.dir(), app.App)}, app.Args...) + appArgs := append([]string{filepath.Join(visor.dir(), app.App)}, app.Args...) appPID := appcommon.ProcID(10) pm.On("Run", mock.Anything, appCfg, appArgs, mock.Anything, mock.Anything). Return(appPID, testhelpers.NoErr) pm.On("Exists", app.App).Return(false) - node.procManager = pm + visor.procManager = pm errCh := make(chan error) go func() { - errCh <- node.SpawnApp(&app, nil) + errCh <- visor.SpawnApp(&app, nil) }() time.Sleep(100 * time.Millisecond) @@ -302,21 +302,21 @@ func TestNodeSpawnAppValidations(t *testing.T) { Name: app.App, Version: app.Version, SockFilePath: c.AppServerSockFile, - VisorPK: c.Node.StaticPubKey.Hex(), + VisorPK: c.Visor.StaticPubKey.Hex(), WorkDir: filepath.Join("", app.App, fmt.Sprintf("v%s", app.Version)), } - appArgs := append([]string{filepath.Join(node.dir(), app.App)}, app.Args...) + appArgs := append([]string{filepath.Join(visor.dir(), app.App)}, app.Args...) appPID := appcommon.ProcID(10) pm.On("Start", mock.Anything, appCfg, appArgs, mock.Anything, mock.Anything). Return(appPID, appserver.ErrAppAlreadyStarted) pm.On("Exists", app.App).Return(true) - node.procManager = pm + visor.procManager = pm errCh := make(chan error) go func() { - errCh <- node.SpawnApp(&app, nil) + errCh <- visor.SpawnApp(&app, nil) }() time.Sleep(100 * time.Millisecond)