-
-
Notifications
You must be signed in to change notification settings - Fork 3k
/
Copy pathdaemon.go
1000 lines (852 loc) · 31.1 KB
/
daemon.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package main
import (
"errors"
_ "expvar"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"sort"
"sync"
"time"
multierror "github.com/hashicorp/go-multierror"
version "github.com/ipfs/kubo"
utilmain "github.com/ipfs/kubo/cmd/ipfs/util"
oldcmds "github.com/ipfs/kubo/commands"
config "github.com/ipfs/kubo/config"
cserial "github.com/ipfs/kubo/config/serialize"
"github.com/ipfs/kubo/core"
commands "github.com/ipfs/kubo/core/commands"
"github.com/ipfs/kubo/core/coreapi"
corehttp "github.com/ipfs/kubo/core/corehttp"
corerepo "github.com/ipfs/kubo/core/corerepo"
libp2p "github.com/ipfs/kubo/core/node/libp2p"
nodeMount "github.com/ipfs/kubo/fuse/node"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
"github.com/ipfs/kubo/repo/fsrepo/migrations"
"github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher"
p2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
pnet "github.com/libp2p/go-libp2p/core/pnet"
sockets "github.com/libp2p/go-socket-activation"
cmds "github.com/ipfs/go-ipfs-cmds"
mprome "github.com/ipfs/go-metrics-prometheus"
options "github.com/ipfs/interface-go-ipfs-core/options"
goprocess "github.com/jbenet/goprocess"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
prometheus "github.com/prometheus/client_golang/prometheus"
promauto "github.com/prometheus/client_golang/prometheus/promauto"
)
const (
adjustFDLimitKwd = "manage-fdlimit"
enableGCKwd = "enable-gc"
initOptionKwd = "init"
initConfigOptionKwd = "init-config"
initProfileOptionKwd = "init-profile"
ipfsMountKwd = "mount-ipfs"
ipnsMountKwd = "mount-ipns"
migrateKwd = "migrate"
mountKwd = "mount"
offlineKwd = "offline" // global option
routingOptionKwd = "routing"
routingOptionSupernodeKwd = "supernode"
routingOptionDHTClientKwd = "dhtclient"
routingOptionDHTKwd = "dht"
routingOptionDHTServerKwd = "dhtserver"
routingOptionNoneKwd = "none"
routingOptionCustomKwd = "custom"
routingOptionDefaultKwd = "default"
routingOptionAutoKwd = "auto"
unencryptTransportKwd = "disable-transport-encryption"
unrestrictedAPIAccessKwd = "unrestricted-api"
writableKwd = "writable"
enablePubSubKwd = "enable-pubsub-experiment"
enableIPNSPubSubKwd = "enable-namesys-pubsub"
enableMultiplexKwd = "enable-mplex-experiment"
agentVersionSuffix = "agent-version-suffix"
// apiAddrKwd = "address-api"
// swarmAddrKwd = "address-swarm"
)
var daemonCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Run a network-connected IPFS node.",
ShortDescription: `
'ipfs daemon' runs a persistent ipfs daemon that can serve commands
over the network. Most applications that use IPFS will do so by
communicating with a daemon over the HTTP API. While the daemon is
running, calls to 'ipfs' commands will be sent over the network to
the daemon.
`,
LongDescription: `
The daemon will start listening on ports on the network, which are
documented in (and can be modified through) 'ipfs config Addresses'.
For example, to change the 'Gateway' port:
ipfs config Addresses.Gateway /ip4/127.0.0.1/tcp/8082
The RPC API address can be changed the same way:
ipfs config Addresses.API /ip4/127.0.0.1/tcp/5002
Make sure to restart the daemon after changing addresses.
By default, the gateway is only accessible locally. To expose it to
other computers in the network, use 0.0.0.0 as the ip address:
ipfs config Addresses.Gateway /ip4/0.0.0.0/tcp/8080
Be careful if you expose the RPC API. It is a security risk, as anyone could
control your node remotely. If you need to control the node remotely,
make sure to protect the port as you would other services or database
(firewall, authenticated proxy, etc).
HTTP Headers
ipfs supports passing arbitrary headers to the RPC API and Gateway. You can
do this by setting headers on the API.HTTPHeaders and Gateway.HTTPHeaders
keys:
ipfs config --json API.HTTPHeaders.X-Special-Header "[\"so special :)\"]"
ipfs config --json Gateway.HTTPHeaders.X-Special-Header "[\"so special :)\"]"
Note that the value of the keys is an _array_ of strings. This is because
headers can have more than one value, and it is convenient to pass through
to other libraries.
CORS Headers (for API)
You can setup CORS headers the same way:
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin "[\"example.com\"]"
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Methods "[\"PUT\", \"GET\", \"POST\"]"
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Credentials "[\"true\"]"
Shutdown
To shut down the daemon, send a SIGINT signal to it (e.g. by pressing 'Ctrl-C')
or send a SIGTERM signal to it (e.g. with 'kill'). It may take a while for the
daemon to shutdown gracefully, but it can be killed forcibly by sending a
second signal.
IPFS_PATH environment variable
ipfs uses a repository in the local file system. By default, the repo is
located at ~/.ipfs. To change the repo location, set the $IPFS_PATH
environment variable:
export IPFS_PATH=/path/to/ipfsrepo
DEPRECATION NOTICE
Previously, ipfs used an environment variable as seen below:
export API_ORIGIN="http://localhost:8888/"
This is deprecated. It is still honored in this version, but will be removed
in a future version, along with this notice. Please move to setting the HTTP
Headers.
`,
},
Options: []cmds.Option{
cmds.BoolOption(initOptionKwd, "Initialize ipfs with default settings if not already initialized"),
cmds.StringOption(initConfigOptionKwd, "Path to existing configuration file to be loaded during --init"),
cmds.StringOption(initProfileOptionKwd, "Configuration profiles to apply for --init. See ipfs init --help for more"),
cmds.StringOption(routingOptionKwd, "Overrides the routing option").WithDefault(routingOptionDefaultKwd),
cmds.BoolOption(mountKwd, "Mounts IPFS to the filesystem using FUSE (experimental)"),
cmds.BoolOption(writableKwd, "Enable legacy Gateway.Writable (deprecated)"),
cmds.StringOption(ipfsMountKwd, "Path to the mountpoint for IPFS (if using --mount). Defaults to config setting."),
cmds.StringOption(ipnsMountKwd, "Path to the mountpoint for IPNS (if using --mount). Defaults to config setting."),
cmds.BoolOption(unrestrictedAPIAccessKwd, "Allow API access to unlisted hashes"),
cmds.BoolOption(unencryptTransportKwd, "Disable transport encryption (for debugging protocols)"),
cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection"),
cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").WithDefault(true),
cmds.BoolOption(migrateKwd, "If true, assume yes at the migrate prompt. If false, assume no."),
cmds.BoolOption(enablePubSubKwd, "Enable experimental pubsub feature. Overrides Pubsub.Enabled config."),
cmds.BoolOption(enableIPNSPubSubKwd, "Enable IPNS over pubsub. Implicitly enables pubsub, overrides Ipns.UsePubsub config."),
cmds.BoolOption(enableMultiplexKwd, "DEPRECATED"),
cmds.StringOption(agentVersionSuffix, "Optional suffix to the AgentVersion presented by `ipfs id` and also advertised through BitSwap."),
// TODO: add way to override addresses. tricky part: updating the config if also --init.
// cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"),
// cmds.StringOption(swarmAddrKwd, "Address for the swarm socket (overrides config)"),
},
Subcommands: map[string]*cmds.Command{},
NoRemote: true,
Extra: commands.CreateCmdExtras(commands.SetDoesNotUseConfigAsInput(true)),
Run: daemonFunc,
}
// defaultMux tells mux to serve path using the default muxer. This is
// mostly useful to hook up things that register in the default muxer,
// and don't provide a convenient http.Handler entry point, such as
// expvar and http/pprof.
func defaultMux(path string) corehttp.ServeOption {
return func(node *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
mux.Handle(path, http.DefaultServeMux)
return mux, nil
}
}
func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) (_err error) {
// Inject metrics before we do anything
err := mprome.Inject()
if err != nil {
log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
}
// let the user know we're going.
fmt.Printf("Initializing daemon...\n")
defer func() {
if _err != nil {
// Print an extra line before any errors. This could go
// in the commands lib but doesn't really make sense for
// all commands.
fmt.Println()
}
}()
// print the ipfs version
printVersion()
managefd, _ := req.Options[adjustFDLimitKwd].(bool)
if managefd {
if _, _, err := utilmain.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
cctx := env.(*oldcmds.Context)
// check transport encryption flag.
unencrypted, _ := req.Options[unencryptTransportKwd].(bool)
if unencrypted {
log.Warnf(`Running with --%s: All connections are UNENCRYPTED.
You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd)
}
// first, whether user has provided the initialization flag. we may be
// running in an uninitialized state.
initialize, _ := req.Options[initOptionKwd].(bool)
if initialize && !fsrepo.IsInitialized(cctx.ConfigRoot) {
cfgLocation, _ := req.Options[initConfigOptionKwd].(string)
profiles, _ := req.Options[initProfileOptionKwd].(string)
var conf *config.Config
if cfgLocation != "" {
if conf, err = cserial.Load(cfgLocation); err != nil {
return err
}
}
if conf == nil {
identity, err := config.CreateIdentity(os.Stdout, []options.KeyGenerateOption{
options.Key.Type(algorithmDefault),
})
if err != nil {
return err
}
conf, err = config.InitWithIdentity(identity)
if err != nil {
return err
}
}
if err = doInit(os.Stdout, cctx.ConfigRoot, false, profiles, conf); err != nil {
return err
}
}
var cacheMigrations, pinMigrations bool
var fetcher migrations.Fetcher
// acquire the repo lock _before_ constructing a node. we need to make
// sure we are permitted to access the resources (datastore, etc.)
repo, err := fsrepo.Open(cctx.ConfigRoot)
switch err {
default:
return err
case fsrepo.ErrNeedMigration:
domigrate, found := req.Options[migrateKwd].(bool)
fmt.Println("Found outdated fs-repo, migrations need to be run.")
if !found {
domigrate = YesNoPrompt("Run migrations now? [y/N]")
}
if !domigrate {
fmt.Println("Not running migrations of fs-repo now.")
fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.tech")
return fmt.Errorf("fs-repo requires migration")
}
// Read Migration section of IPFS config
configFileOpt, _ := req.Options[commands.ConfigFileOption].(string)
migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot, configFileOpt)
if err != nil {
return err
}
// Define function to create IPFS fetcher. Do not supply an
// already-constructed IPFS fetcher, because this may be expensive and
// not needed according to migration config. Instead, supply a function
// to construct the particular IPFS fetcher implementation used here,
// which is called only if an IPFS fetcher is needed.
newIpfsFetcher := func(distPath string) migrations.Fetcher {
return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot, configFileOpt)
}
// Fetch migrations from current distribution, or location from environ
fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist)
// Create fetchers according to migrationCfg.DownloadSources
fetcher, err = migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher)
if err != nil {
return err
}
defer fetcher.Close()
if migrationCfg.Keep == "cache" {
cacheMigrations = true
} else if migrationCfg.Keep == "pin" {
pinMigrations = true
}
if cacheMigrations || pinMigrations {
// Create temp directory to store downloaded migration archives
migrations.DownloadDirectory, err = os.MkdirTemp("", "migrations")
if err != nil {
return err
}
// Defer cleanup of download directory so that it gets cleaned up
// if daemon returns early due to error
defer func() {
if migrations.DownloadDirectory != "" {
os.RemoveAll(migrations.DownloadDirectory)
}
}()
}
err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", false)
if err != nil {
fmt.Println("The migrations of fs-repo failed:")
fmt.Printf(" %s\n", err)
fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
fmt.Println(" https://github.com/ipfs/fs-repo-migrations")
return err
}
repo, err = fsrepo.Open(cctx.ConfigRoot)
if err != nil {
return err
}
case nil:
break
}
// The node will also close the repo but there are many places we could
// fail before we get to that. It can't hurt to close it twice.
defer repo.Close()
offline, _ := req.Options[offlineKwd].(bool)
ipnsps, ipnsPsSet := req.Options[enableIPNSPubSubKwd].(bool)
pubsub, psSet := req.Options[enablePubSubKwd].(bool)
if _, hasMplex := req.Options[enableMultiplexKwd]; hasMplex {
log.Errorf("The mplex multiplexer has been enabled by default and the experimental %s flag has been removed.")
log.Errorf("To disable this multiplexer, please configure `Swarm.Transports.Multiplexers'.")
}
cfg, err := repo.Config()
if err != nil {
return err
}
if !psSet {
pubsub = cfg.Pubsub.Enabled.WithDefault(false)
}
if !ipnsPsSet {
ipnsps = cfg.Ipns.UsePubsub.WithDefault(false)
}
// Start assembling node config
ncfg := &core.BuildCfg{
Repo: repo,
Permanent: true, // It is temporary way to signify that node is permanent
Online: !offline,
DisableEncryptedConnections: unencrypted,
ExtraOpts: map[string]bool{
"pubsub": pubsub,
"ipnsps": ipnsps,
},
//TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
}
routingOption, _ := req.Options[routingOptionKwd].(string)
if routingOption == routingOptionDefaultKwd {
routingOption = cfg.Routing.Type.WithDefault(routingOptionAutoKwd)
if routingOption == "" {
routingOption = routingOptionAutoKwd
}
}
// Private setups can't leverage peers returned by default IPNIs (Routing.Type=auto)
// To avoid breaking existing setups, switch them to DHT-only.
if routingOption == routingOptionAutoKwd {
if key, _ := repo.SwarmKey(); key != nil || pnet.ForcePrivateNetwork {
log.Error("Private networking (swarm.key / LIBP2P_FORCE_PNET) does not work with public HTTP IPNIs enabled by Routing.Type=auto. Kubo will use Routing.Type=dht instead. Update config to remove this message.")
routingOption = routingOptionDHTKwd
}
}
switch routingOption {
case routingOptionSupernodeKwd:
return errors.New("supernode routing was never fully implemented and has been removed")
case routingOptionDefaultKwd, routingOptionAutoKwd:
ncfg.Routing = libp2p.ConstructDefaultRouting(
cfg.Identity.PeerID,
cfg.Addresses.Swarm,
cfg.Identity.PrivKey,
)
case routingOptionDHTClientKwd:
ncfg.Routing = libp2p.DHTClientOption
case routingOptionDHTKwd:
ncfg.Routing = libp2p.DHTOption
case routingOptionDHTServerKwd:
ncfg.Routing = libp2p.DHTServerOption
case routingOptionNoneKwd:
ncfg.Routing = libp2p.NilRouterOption
case routingOptionCustomKwd:
ncfg.Routing = libp2p.ConstructDelegatedRouting(
cfg.Routing.Routers,
cfg.Routing.Methods,
cfg.Identity.PeerID,
cfg.Addresses.Swarm,
cfg.Identity.PrivKey,
)
default:
return fmt.Errorf("unrecognized routing option: %s", routingOption)
}
agentVersionSuffixString, _ := req.Options[agentVersionSuffix].(string)
if agentVersionSuffixString != "" {
version.SetUserAgentSuffix(agentVersionSuffixString)
}
node, err := core.NewNode(req.Context, ncfg)
if err != nil {
return err
}
node.IsDaemon = true
if node.PNetFingerprint != nil {
fmt.Println("Swarm is limited to private network of peers with the swarm key")
fmt.Printf("Swarm key fingerprint: %x\n", node.PNetFingerprint)
}
if (pnet.ForcePrivateNetwork || node.PNetFingerprint != nil) && routingOption == routingOptionAutoKwd {
// This should never happen, but better safe than sorry
log.Fatal("Private network does not work with Routing.Type=auto. Update your config to Routing.Type=dht (or none, and do manual peering)")
}
printSwarmAddrs(node)
if node.PrivateKey.Type() == p2pcrypto.RSA {
fmt.Print(`
Warning: You are using an RSA Peer ID, which was replaced by Ed25519
as the default recommended in Kubo since September 2020. Signing with
RSA Peer IDs is more CPU-intensive than with other key types.
It is recommended that you change your public key type to ed25519
by using the following command:
ipfs key rotate -o rsa-key-backup -t ed25519
After changing your key type, restart your node for the changes to
take effect.
`)
}
defer func() {
// We wait for the node to close first, as the node has children
// that it will wait for before closing, such as the API server.
node.Close()
select {
case <-req.Context.Done():
log.Info("Gracefully shut down daemon")
default:
}
}()
cctx.ConstructNode = func() (*core.IpfsNode, error) {
return node, nil
}
// Start "core" plugins. We want to do this *before* starting the HTTP
// API as the user may be relying on these plugins.
err = cctx.Plugins.Start(node)
if err != nil {
return err
}
node.Process.AddChild(goprocess.WithTeardown(cctx.Plugins.Close))
// construct api endpoint - every time
apiErrc, err := serveHTTPApi(req, cctx)
if err != nil {
return err
}
// construct fuse mountpoints - if the user provided the --mount flag
mount, _ := req.Options[mountKwd].(bool)
if mount && offline {
return cmds.Errorf(cmds.ErrClient, "mount is not currently supported in offline mode")
}
if mount {
if err := mountFuse(req, cctx); err != nil {
return err
}
}
// repo blockstore GC - if --enable-gc flag is present
gcErrc, err := maybeRunGC(req, node)
if err != nil {
return err
}
// Add any files downloaded by migration.
if cacheMigrations || pinMigrations {
err = addMigrations(cctx.Context(), node, fetcher, pinMigrations)
if err != nil {
fmt.Fprintln(os.Stderr, "Could not add migration to IPFS:", err)
}
// Remove download directory so that it does not remain for lifetime of
// daemon or get left behind if daemon has a hard exit
os.RemoveAll(migrations.DownloadDirectory)
migrations.DownloadDirectory = ""
}
if fetcher != nil {
// If there is an error closing the IpfsFetcher, then print error, but
// do not fail because of it.
err = fetcher.Close()
if err != nil {
log.Errorf("error closing IPFS fetcher: %s", err)
}
}
// construct http gateway
gwErrc, err := serveHTTPGateway(req, cctx)
if err != nil {
return err
}
// Add ipfs version info to prometheus metrics
var ipfsInfoMetric = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "ipfs_info",
Help: "IPFS version information.",
}, []string{"version", "commit"})
// Setting to 1 lets us multiply it with other stats to add the version labels
ipfsInfoMetric.With(prometheus.Labels{
"version": version.CurrentVersionNumber,
"commit": version.CurrentCommit,
}).Set(1)
// TODO(9285): make metrics more configurable
// initialize metrics collector
prometheus.MustRegister(&corehttp.IpfsNodeCollector{Node: node})
// start MFS pinning thread
startPinMFS(daemonConfigPollInterval, cctx, &ipfsPinMFSNode{node})
// The daemon is *finally* ready.
fmt.Printf("Daemon is ready\n")
notifyReady()
// Give the user some immediate feedback when they hit C-c
go func() {
<-req.Context.Done()
notifyStopping()
fmt.Println("Received interrupt signal, shutting down...")
fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)")
}()
// Give the user heads up if daemon running in online mode has no peers after 1 minute
if !offline {
time.AfterFunc(1*time.Minute, func() {
cfg, err := cctx.GetConfig()
if err != nil {
log.Errorf("failed to access config: %s", err)
}
if len(cfg.Bootstrap) == 0 && len(cfg.Peering.Peers) == 0 {
// Skip peer check if Bootstrap and Peering lists are empty
// (means user disabled them on purpose)
log.Warn("skipping bootstrap: empty Bootstrap and Peering lists")
return
}
ipfs, err := coreapi.NewCoreAPI(node)
if err != nil {
log.Errorf("failed to access CoreAPI: %v", err)
}
peers, err := ipfs.Swarm().Peers(cctx.Context())
if err != nil {
log.Errorf("failed to read swarm peers: %v", err)
}
if len(peers) == 0 {
log.Error("failed to bootstrap (no peers found): consider updating Bootstrap or Peering section of your config")
}
})
}
// Hard deprecation notice if someone still uses IPFS_REUSEPORT
if flag := os.Getenv("IPFS_REUSEPORT"); flag != "" {
log.Fatal("Support for IPFS_REUSEPORT was removed. Use LIBP2P_TCP_REUSEPORT instead.")
}
// collect long-running errors and block for shutdown
// TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown
var errs error
for err := range merge(apiErrc, gwErrc, gcErrc) {
if err != nil {
errs = multierror.Append(errs, err)
}
}
return errs
}
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests
func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
cfg, err := cctx.GetConfig()
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: GetConfig() failed: %s", err)
}
listeners, err := sockets.TakeListeners("io.ipfs.api")
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: socket activation failed: %s", err)
}
apiAddrs := make([]string, 0, 2)
apiAddr, _ := req.Options[commands.ApiOption].(string)
if apiAddr == "" {
apiAddrs = cfg.Addresses.API
} else {
apiAddrs = append(apiAddrs, apiAddr)
}
listenerAddrs := make(map[string]bool, len(listeners))
for _, listener := range listeners {
listenerAddrs[string(listener.Multiaddr().Bytes())] = true
}
for _, addr := range apiAddrs {
apiMaddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: invalid API address: %q (err: %s)", addr, err)
}
if listenerAddrs[string(apiMaddr.Bytes())] {
continue
}
apiLis, err := manet.Listen(apiMaddr)
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: manet.Listen(%s) failed: %s", apiMaddr, err)
}
listenerAddrs[string(apiMaddr.Bytes())] = true
listeners = append(listeners, apiLis)
}
for _, listener := range listeners {
// we might have listened to /tcp/0 - let's see what we are listing on
fmt.Printf("API server listening on %s\n", listener.Multiaddr())
// Browsers require TCP.
switch listener.Addr().Network() {
case "tcp", "tcp4", "tcp6":
fmt.Printf("WebUI: http://%s/webui\n", listener.Addr())
}
}
// by default, we don't let you load arbitrary ipfs objects through the api,
// because this would open up the api to scripting vulnerabilities.
// only the webui objects are allowed.
// if you know what you're doing, go ahead and pass --unrestricted-api.
unrestricted, _ := req.Options[unrestrictedAPIAccessKwd].(bool)
gatewayOpt := corehttp.GatewayOption(false, corehttp.WebUIPaths...)
if unrestricted {
gatewayOpt = corehttp.GatewayOption(true, "/ipfs", "/ipns")
}
var opts = []corehttp.ServeOption{
corehttp.MetricsCollectionOption("api"),
corehttp.MetricsOpenCensusCollectionOption(),
corehttp.MetricsOpenCensusDefaultPrometheusRegistry(),
corehttp.CheckVersionOption(),
corehttp.CommandsOption(*cctx),
corehttp.WebUIOption,
gatewayOpt,
corehttp.VersionOption(),
defaultMux("/debug/vars"),
defaultMux("/debug/pprof/"),
defaultMux("/debug/stack"),
corehttp.MutexFractionOption("/debug/pprof-mutex/"),
corehttp.BlockProfileRateOption("/debug/pprof-block/"),
corehttp.MetricsScrapingOption("/debug/metrics/prometheus"),
corehttp.LogOption(),
}
if len(cfg.Gateway.RootRedirect) > 0 {
opts = append(opts, corehttp.RedirectOption("", cfg.Gateway.RootRedirect))
}
node, err := cctx.ConstructNode()
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err)
}
if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil {
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err)
}
errc := make(chan error)
var wg sync.WaitGroup
for _, apiLis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(apiLis)
}
go func() {
wg.Wait()
close(errc)
}()
return errc, nil
}
func rewriteMaddrToUseLocalhostIfItsAny(maddr ma.Multiaddr) ma.Multiaddr {
first, rest := ma.SplitFirst(maddr)
switch {
case first.Equal(manet.IP4Unspecified):
return manet.IP4Loopback.Encapsulate(rest)
case first.Equal(manet.IP6Unspecified):
return manet.IP6Loopback.Encapsulate(rest)
default:
return maddr // not ip
}
}
// printSwarmAddrs prints the addresses of the host
func printSwarmAddrs(node *core.IpfsNode) {
if !node.IsOnline {
fmt.Println("Swarm not listening, running in offline mode.")
return
}
ifaceAddrs, err := node.PeerHost.Network().InterfaceListenAddresses()
if err != nil {
log.Errorf("failed to read listening addresses: %s", err)
}
lisAddrs := make([]string, len(ifaceAddrs))
for i, addr := range ifaceAddrs {
lisAddrs[i] = addr.String()
}
sort.Strings(lisAddrs)
for _, addr := range lisAddrs {
fmt.Printf("Swarm listening on %s\n", addr)
}
nodePhostAddrs := node.PeerHost.Addrs()
addrs := make([]string, len(nodePhostAddrs))
for i, addr := range nodePhostAddrs {
addrs[i] = addr.String()
}
sort.Strings(addrs)
for _, addr := range addrs {
fmt.Printf("Swarm announcing %s\n", addr)
}
}
// serveHTTPGateway collects options, creates listener, prints status message and starts serving requests
func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
cfg, err := cctx.GetConfig()
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: GetConfig() failed: %s", err)
}
writable, writableOptionFound := req.Options[writableKwd].(bool)
if !writableOptionFound {
writable = cfg.Gateway.Writable.WithDefault(false)
}
if writable {
log.Error("serveHTTPGateway: legacy Gateway.Writable is DEPRECATED and will be removed or changed in future versions. If you are still using this, provide feedback in https://github.com/ipfs/specs/issues/375")
}
listeners, err := sockets.TakeListeners("io.ipfs.gateway")
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: socket activation failed: %s", err)
}
listenerAddrs := make(map[string]bool, len(listeners))
for _, listener := range listeners {
listenerAddrs[string(listener.Multiaddr().Bytes())] = true
}
gatewayAddrs := cfg.Addresses.Gateway
for _, addr := range gatewayAddrs {
gatewayMaddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: invalid gateway address: %q (err: %s)", addr, err)
}
if listenerAddrs[string(gatewayMaddr.Bytes())] {
continue
}
gwLis, err := manet.Listen(gatewayMaddr)
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: manet.Listen(%s) failed: %s", gatewayMaddr, err)
}
listenerAddrs[string(gatewayMaddr.Bytes())] = true
listeners = append(listeners, gwLis)
}
// we might have listened to /tcp/0 - let's see what we are listing on
gwType := "readonly"
if writable {
gwType = "writable"
}
for _, listener := range listeners {
fmt.Printf("Gateway (%s) server listening on %s\n", gwType, listener.Multiaddr())
}
cmdctx := *cctx
cmdctx.Gateway = true
var opts = []corehttp.ServeOption{
corehttp.MetricsCollectionOption("gateway"),
corehttp.HostnameOption(),
corehttp.GatewayOption(writable, "/ipfs", "/ipns"),
corehttp.VersionOption(),
corehttp.CheckVersionOption(),
corehttp.CommandsROOption(cmdctx),
}
if cfg.Experimental.P2pHttpProxy {
opts = append(opts, corehttp.P2PProxyOption())
}
if len(cfg.Gateway.RootRedirect) > 0 {
opts = append(opts, corehttp.RedirectOption("", cfg.Gateway.RootRedirect))
}
if len(cfg.Gateway.PathPrefixes) > 0 {
log.Fatal("Support for custom Gateway.PathPrefixes was removed: https://github.com/ipfs/go-ipfs/issues/7702")
}
node, err := cctx.ConstructNode()
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: ConstructNode() failed: %s", err)
}
if len(listeners) > 0 {
addr, err := manet.ToNetAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr()))
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: manet.ToIP() failed: %w", err)
}
if err := node.Repo.SetGatewayAddr(addr); err != nil {
return nil, fmt.Errorf("serveHTTPGateway: SetGatewayAddr() failed: %w", err)
}
}
errc := make(chan error)
var wg sync.WaitGroup
for _, lis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(lis)
}
go func() {
wg.Wait()
close(errc)
}()
return errc, nil
}
// collects options and opens the fuse mountpoint
func mountFuse(req *cmds.Request, cctx *oldcmds.Context) error {
cfg, err := cctx.GetConfig()
if err != nil {
return fmt.Errorf("mountFuse: GetConfig() failed: %s", err)
}
fsdir, found := req.Options[ipfsMountKwd].(string)
if !found {
fsdir = cfg.Mounts.IPFS
}
nsdir, found := req.Options[ipnsMountKwd].(string)
if !found {
nsdir = cfg.Mounts.IPNS
}
node, err := cctx.ConstructNode()
if err != nil {
return fmt.Errorf("mountFuse: ConstructNode() failed: %s", err)
}
err = nodeMount.Mount(node, fsdir, nsdir)
if err != nil {
return err
}
fmt.Printf("IPFS mounted at: %s\n", fsdir)
fmt.Printf("IPNS mounted at: %s\n", nsdir)
return nil
}
func maybeRunGC(req *cmds.Request, node *core.IpfsNode) (<-chan error, error) {
enableGC, _ := req.Options[enableGCKwd].(bool)
if !enableGC {
return nil, nil
}
errc := make(chan error)
go func() {
errc <- corerepo.PeriodicGC(req.Context, node)
close(errc)
}()
return errc, nil
}
// merge does fan-in of multiple read-only error channels
// taken from http://blog.golang.org/pipelines
func merge(cs ...<-chan error) <-chan error {
var wg sync.WaitGroup
out := make(chan error)
// Start an output goroutine for each input channel in cs. output
// copies values from c to out until c is closed, then calls wg.Done.
output := func(c <-chan error) {
for n := range c {
out <- n
}
wg.Done()
}
for _, c := range cs {
if c != nil {
wg.Add(1)
go output(c)
}
}
// Start a goroutine to close out once all the output goroutines are
// done. This must start after the wg.Add call.
go func() {
wg.Wait()
close(out)
}()
return out
}
func YesNoPrompt(prompt string) bool {
var s string
for i := 0; i < 3; i++ {
fmt.Printf("%s ", prompt)
fmt.Scanf("%s", &s)
switch s {
case "y", "Y":
return true
case "n", "N":
return false
case "":
return false
}
fmt.Println("Please press either 'y' or 'n'")
}
return false
}
func printVersion() {
v := version.CurrentVersionNumber
if version.CurrentCommit != "" {
v += "-" + version.CurrentCommit
}
fmt.Printf("Kubo version: %s\n", v)
fmt.Printf("Repo version: %d\n", fsrepo.RepoVersion)
fmt.Printf("System version: %s\n", runtime.GOARCH+"/"+runtime.GOOS)
fmt.Printf("Golang version: %s\n", runtime.Version())
}