Skip to content

Commit df4d122

Browse files
author
Brian Tiger Chow
committed
refactor(core) Close in teardown
This declarative style is simpler to compose than the imperative wiring up of objects.
1 parent f303f41 commit df4d122

File tree

1 file changed

+38
-20
lines changed

1 file changed

+38
-20
lines changed

core/core.go

+38-20
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package core
22

33
import (
44
"fmt"
5+
"io"
56

67
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
78
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
@@ -97,11 +98,22 @@ type Mounts struct {
9798

9899
type ConfigOption func(ctx context.Context) (*IpfsNode, error)
99100

100-
func NewIPFSNode(ctx context.Context, option ConfigOption) (*IpfsNode, error) {
101+
func NewIPFSNode(parent context.Context, option ConfigOption) (*IpfsNode, error) {
102+
ctxg := ctxgroup.WithContext(parent)
103+
ctx := ctxg.Context()
104+
success := false // flip to true after all sub-system inits succeed
105+
defer func() {
106+
if !success {
107+
ctxg.Close()
108+
}
109+
}()
110+
101111
node, err := option(ctx)
102112
if err != nil {
103113
return nil, err
104114
}
115+
node.ContextGroup = ctxg
116+
ctxg.SetTeardown(node.teardown)
105117

106118
// Need to make sure it's perfectly clear 1) which variables are expected
107119
// to be initialized at this point, and 2) which variables will be
@@ -120,6 +132,7 @@ func NewIPFSNode(ctx context.Context, option ConfigOption) (*IpfsNode, error) {
120132
node.Pinning = pin.NewPinner(node.Repo.Datastore(), node.DAG)
121133
}
122134
node.Resolver = &path.Resolver{DAG: node.DAG}
135+
success = true
123136
return node, nil
124137
}
125138

@@ -135,13 +148,6 @@ func Online(r repo.Repo) ConfigOption {
135148
func Standard(r repo.Repo, online bool) ConfigOption {
136149
return func(ctx context.Context) (n *IpfsNode, err error) {
137150

138-
success := false // flip to true after all sub-system inits succeed
139-
defer func() {
140-
if !success && n != nil {
141-
n.Close()
142-
}
143-
}()
144-
145151
if r == nil {
146152
return nil, debugerror.Errorf("repo required")
147153
}
@@ -155,9 +161,6 @@ func Standard(r repo.Repo, online bool) ConfigOption {
155161
Repo: r,
156162
}
157163

158-
n.ContextGroup = ctxgroup.WithContextAndTeardown(ctx, n.teardown)
159-
ctx = n.ContextGroup.Context()
160-
161164
// setup Peerstore
162165
n.Peerstore = peer.NewPeerstore()
163166

@@ -180,7 +183,6 @@ func Standard(r repo.Repo, online bool) ConfigOption {
180183
n.Exchange = offline.Exchange(n.Blockstore)
181184
}
182185

183-
success = true
184186
return n, nil
185187
}
186188
}
@@ -197,7 +199,7 @@ func (n *IpfsNode) StartOnlineServices() error {
197199
return err
198200
}
199201

200-
peerhost, err := constructPeerHost(ctx, n.ContextGroup, n.Repo.Config(), n.Identity, n.Peerstore)
202+
peerhost, err := constructPeerHost(ctx, n.Repo.Config(), n.Identity, n.Peerstore)
201203
if err != nil {
202204
return debugerror.Wrap(err)
203205
}
@@ -207,7 +209,7 @@ func (n *IpfsNode) StartOnlineServices() error {
207209
n.Diagnostics = diag.NewDiagnostics(n.Identity, n.PeerHost)
208210

209211
// setup routing service
210-
dhtRouting, err := constructDHTRouting(ctx, n.ContextGroup, n.PeerHost, n.Repo.Datastore())
212+
dhtRouting, err := constructDHTRouting(ctx, n.PeerHost, n.Repo.Datastore())
211213
if err != nil {
212214
return debugerror.Wrap(err)
213215
}
@@ -243,9 +245,27 @@ func (n *IpfsNode) StartOnlineServices() error {
243245
return nil
244246
}
245247

248+
// teardown closes children
246249
func (n *IpfsNode) teardown() error {
247-
if err := n.Repo.Close(); err != nil {
248-
return err
250+
var errs []error
251+
closers := []io.Closer{
252+
n.Repo,
253+
}
254+
if n.DHT != nil {
255+
closers = append(closers, n.DHT)
256+
}
257+
if n.PeerHost != nil {
258+
closers = append(closers, n.PeerHost)
259+
}
260+
for _, closer := range closers {
261+
if closer != nil {
262+
if err := closer.Close(); err != nil {
263+
errs = append(errs, err)
264+
}
265+
}
266+
}
267+
if len(errs) > 0 {
268+
return errs[0]
249269
}
250270
return nil
251271
}
@@ -344,7 +364,7 @@ func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) {
344364
}
345365

346366
// isolates the complex initialization steps
347-
func constructPeerHost(ctx context.Context, ctxg ctxgroup.ContextGroup, cfg *config.Config, id peer.ID, ps peer.Peerstore) (p2phost.Host, error) {
367+
func constructPeerHost(ctx context.Context, cfg *config.Config, id peer.ID, ps peer.Peerstore) (p2phost.Host, error) {
348368
listenAddrs, err := listenAddresses(cfg)
349369
if err != nil {
350370
return nil, debugerror.Wrap(err)
@@ -362,7 +382,6 @@ func constructPeerHost(ctx context.Context, ctxg ctxgroup.ContextGroup, cfg *con
362382
if err != nil {
363383
return nil, debugerror.Wrap(err)
364384
}
365-
ctxg.AddChildGroup(network.CtxGroup())
366385

367386
peerhost := p2pbhost.New(network)
368387
// explicitly set these as our listen addrs.
@@ -377,9 +396,8 @@ func constructPeerHost(ctx context.Context, ctxg ctxgroup.ContextGroup, cfg *con
377396
return peerhost, nil
378397
}
379398

380-
func constructDHTRouting(ctx context.Context, ctxg ctxgroup.ContextGroup, host p2phost.Host, ds datastore.ThreadSafeDatastore) (*dht.IpfsDHT, error) {
399+
func constructDHTRouting(ctx context.Context, host p2phost.Host, ds datastore.ThreadSafeDatastore) (*dht.IpfsDHT, error) {
381400
dhtRouting := dht.NewDHT(ctx, host, ds)
382401
dhtRouting.Validators[IpnsValidatorTag] = namesys.ValidateIpnsRecord
383-
ctxg.AddChildGroup(dhtRouting)
384402
return dhtRouting, nil
385403
}

0 commit comments

Comments
 (0)