@@ -20,7 +20,6 @@ import (
20
20
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
21
21
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
22
22
goprocess "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
23
- goprocessctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context"
24
23
mamask "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/multiaddr-filter"
25
24
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
26
25
diag "github.com/ipfs/go-ipfs/diagnostics"
@@ -46,7 +45,6 @@ import (
46
45
exchange "github.com/ipfs/go-ipfs/exchange"
47
46
bitswap "github.com/ipfs/go-ipfs/exchange/bitswap"
48
47
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
49
- offline "github.com/ipfs/go-ipfs/exchange/offline"
50
48
rp "github.com/ipfs/go-ipfs/exchange/reprovide"
51
49
52
50
mount "github.com/ipfs/go-ipfs/fuse/mount"
@@ -123,124 +121,6 @@ type Mounts struct {
123
121
Ipns mount.Mount
124
122
}
125
123
126
- type ConfigOption func (ctx context.Context ) (* IpfsNode , error )
127
-
128
- func NewIPFSNode (ctx context.Context , option ConfigOption ) (* IpfsNode , error ) {
129
- node , err := option (ctx )
130
- if err != nil {
131
- return nil , err
132
- }
133
-
134
- if node .ctx == nil {
135
- node .ctx = ctx
136
- }
137
- if node .proc == nil {
138
- node .proc = goprocessctx .WithContextAndTeardown (node .ctx , node .teardown )
139
- }
140
-
141
- success := false // flip to true after all sub-system inits succeed
142
- defer func () {
143
- if ! success {
144
- node .proc .Close ()
145
- }
146
- }()
147
-
148
- // Need to make sure it's perfectly clear 1) which variables are expected
149
- // to be initialized at this point, and 2) which variables will be
150
- // initialized after this point.
151
-
152
- node .Blocks = bserv .New (node .Blockstore , node .Exchange )
153
-
154
- if node .Peerstore == nil {
155
- node .Peerstore = peer .NewPeerstore ()
156
- }
157
- node .DAG = merkledag .NewDAGService (node .Blocks )
158
- node .Pinning , err = pin .LoadPinner (node .Repo .Datastore (), node .DAG )
159
- if err != nil {
160
- node .Pinning = pin .NewPinner (node .Repo .Datastore (), node .DAG )
161
- }
162
- node .Resolver = & path.Resolver {DAG : node .DAG }
163
-
164
- success = true
165
- return node , nil
166
- }
167
-
168
- func Offline (r repo.Repo ) ConfigOption {
169
- return Standard (r , false )
170
- }
171
-
172
- func OnlineWithOptions (r repo.Repo , router RoutingOption , ho HostOption ) ConfigOption {
173
- return standardWithRouting (r , true , router , ho )
174
- }
175
-
176
- func Online (r repo.Repo ) ConfigOption {
177
- return Standard (r , true )
178
- }
179
-
180
- // DEPRECATED: use Online, Offline functions
181
- func Standard (r repo.Repo , online bool ) ConfigOption {
182
- return standardWithRouting (r , online , DHTOption , DefaultHostOption )
183
- }
184
-
185
- // TODO refactor so maybeRouter isn't special-cased in this way
186
- func standardWithRouting (r repo.Repo , online bool , routingOption RoutingOption , hostOption HostOption ) ConfigOption {
187
- return func (ctx context.Context ) (n * IpfsNode , err error ) {
188
- // FIXME perform node construction in the main constructor so it isn't
189
- // necessary to perform this teardown in this scope.
190
- success := false
191
- defer func () {
192
- if ! success && n != nil {
193
- n .teardown ()
194
- }
195
- }()
196
-
197
- // TODO move as much of node initialization as possible into
198
- // NewIPFSNode. The larger these config options are, the harder it is
199
- // to test all node construction code paths.
200
-
201
- if r == nil {
202
- return nil , fmt .Errorf ("repo required" )
203
- }
204
- n = & IpfsNode {
205
- mode : func () mode {
206
- if online {
207
- return onlineMode
208
- }
209
- return offlineMode
210
- }(),
211
- Repo : r ,
212
- }
213
-
214
- n .ctx = ctx
215
- n .proc = goprocessctx .WithContextAndTeardown (ctx , n .teardown )
216
-
217
- // setup Peerstore
218
- n .Peerstore = peer .NewPeerstore ()
219
-
220
- // setup local peer ID (private key is loaded in online setup)
221
- if err := n .loadID (); err != nil {
222
- return nil , err
223
- }
224
-
225
- n .Blockstore , err = bstore .WriteCached (bstore .NewBlockstore (n .Repo .Datastore ()), kSizeBlockstoreWriteCache )
226
- if err != nil {
227
- return nil , err
228
- }
229
-
230
- if online {
231
- do := setupDiscoveryOption (n .Repo .Config ().Discovery )
232
- if err := n .startOnlineServices (ctx , routingOption , hostOption , do ); err != nil {
233
- return nil , err
234
- }
235
- } else {
236
- n .Exchange = offline .Exchange (n .Blockstore )
237
- }
238
-
239
- success = true
240
- return n , nil
241
- }
242
- }
243
-
244
124
func (n * IpfsNode ) startOnlineServices (ctx context.Context , routingOption RoutingOption , hostOption HostOption , do DiscoveryOption ) error {
245
125
246
126
if n .PeerHost != nil { // already online.
@@ -371,10 +251,13 @@ func (n *IpfsNode) teardown() error {
371
251
// owned objects are closed in this teardown to ensure that they're closed
372
252
// regardless of which constructor was used to add them to the node.
373
253
closers := []io.Closer {
374
- n .Exchange ,
375
254
n .Repo ,
376
255
}
377
256
257
+ if n .Exchange != nil {
258
+ closers = append (closers , n .Exchange )
259
+ }
260
+
378
261
if n .Mounts .Ipfs != nil {
379
262
closers = append (closers , mount .Closer (n .Mounts .Ipfs ))
380
263
}
0 commit comments