@@ -54,7 +54,7 @@ type IJobMgr interface {
54
54
//Throughput() XferThroughput
55
55
// If existingPlanMMF is nil, a new MMF is opened.
56
56
AddJobPart (partNum PartNumber , planFile JobPartPlanFileName , existingPlanMMF * JobPartPlanMMF , sourceSAS string ,
57
- destinationSAS string , scheduleTransfers bool ) IJobPartMgr
57
+ destinationSAS string , scheduleTransfers bool , completionChan chan struct {} ) IJobPartMgr
58
58
SetIncludeExclude (map [string ]int , map [string ]int )
59
59
IncludeExclude () (map [string ]int , map [string ]int )
60
60
ResumeTransfers (appCtx context.Context )
@@ -97,14 +97,15 @@ type IJobMgr interface {
97
97
AddSuccessfulBytesInActiveFiles (n int64 )
98
98
SuccessfulBytesInActiveFiles () uint64
99
99
CancelPauseJobOrder (desiredJobStatus common.JobStatus ) common.CancelPauseResumeResponse
100
+ IsDaemon () bool
100
101
}
101
102
102
103
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
103
104
104
105
func NewJobMgr (concurrency ConcurrencySettings , jobID common.JobID , appCtx context.Context , cpuMon common.CPUMonitor , level common.LogLevel ,
105
106
commandString string , logFileFolder string , tuner ConcurrencyTuner ,
106
107
pacer PacerAdmin , slicePool common.ByteSlicePooler , cacheLimiter common.CacheLimiter , fileCountLimiter common.CacheLimiter ,
107
- jobLogger common.ILoggerResetable ) IJobMgr {
108
+ jobLogger common.ILoggerResetable , daemonMode bool ) IJobMgr {
108
109
const channelSize = 100000
109
110
// PartsChannelSize defines the number of JobParts which can be placed into the
110
111
// parts channel. Any JobPart which comes from FE and partChannel is full,
@@ -128,6 +129,7 @@ func NewJobMgr(concurrency ConcurrencySettings, jobID common.JobID, appCtx conte
128
129
129
130
/* Create book-keeping channels */
130
131
jobPartProgressCh := make (chan jobPartProgressInfo )
132
+ var jstm jobStatusManager
131
133
jstm .respChan = make (chan common.ListJobSummaryResponse )
132
134
jstm .listReq = make (chan bool )
133
135
jstm .partCreated = make (chan JobPartCreatedMsg , 100 )
@@ -140,7 +142,6 @@ func NewJobMgr(concurrency ConcurrencySettings, jobID common.JobID, appCtx conte
140
142
concurrency : concurrency ,
141
143
overwritePrompter : newOverwritePrompter (),
142
144
pipelineNetworkStats : newPipelineNetworkStats (tuner ), // let the stats coordinate with the concurrency tuner
143
- exclusiveDestinationMapHolder : & atomic.Value {},
144
145
initMu : & sync.Mutex {},
145
146
jobPartProgress : jobPartProgressCh ,
146
147
coordinatorChannels : CoordinatorChannels {
@@ -167,6 +168,8 @@ func NewJobMgr(concurrency ConcurrencySettings, jobID common.JobID, appCtx conte
167
168
cacheLimiter : cacheLimiter ,
168
169
fileCountLimiter : fileCountLimiter ,
169
170
cpuMon : cpuMon ,
171
+ jstm : & jstm ,
172
+ isDaemon : daemonMode ,
170
173
/*Other fields remain zero-value until this job is scheduled */ }
171
174
jm .reset (appCtx , commandString )
172
175
jm .logJobsAdminMessages ()
@@ -249,6 +252,7 @@ type jobMgrInitState struct {
249
252
securityInfoPersistenceManager * securityInfoPersistenceManager
250
253
folderCreationTracker FolderCreationTracker
251
254
folderDeletionManager common.FolderDeletionManager
255
+ exclusiveDestinationMapHolder * atomic.Value
252
256
}
253
257
254
258
// jobMgr represents the runtime information for a Job
@@ -274,7 +278,6 @@ type jobMgr struct {
274
278
cancel context.CancelFunc
275
279
pipelineNetworkStats * PipelineNetworkStats
276
280
277
- exclusiveDestinationMapHolder * atomic.Value
278
281
279
282
// Share the same HTTP Client across all job parts, so that the we maximize re-use of
280
283
// its internal connection pool
@@ -311,10 +314,13 @@ type jobMgr struct {
311
314
slicePool common.ByteSlicePooler
312
315
cacheLimiter common.CacheLimiter
313
316
fileCountLimiter common.CacheLimiter
317
+ jstm * jobStatusManager
314
318
315
319
/* Pool sizer related stuff */
316
320
atomicCurrentMainPoolSize int32 // align 64 bit integers for 32 bit arch
317
321
atomicSuccessfulBytesInActiveFiles int64
322
+
323
+ isDaemon bool /* is it running as service */
318
324
}
319
325
320
326
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -395,12 +401,14 @@ func (jm *jobMgr) logPerfInfo(displayStrings []string, constraint common.PerfCon
395
401
396
402
// initializeJobPartPlanInfo func initializes the JobPartPlanInfo handler for given JobPartOrder
397
403
func (jm * jobMgr ) AddJobPart (partNum PartNumber , planFile JobPartPlanFileName , existingPlanMMF * JobPartPlanMMF , sourceSAS string ,
398
- destinationSAS string , scheduleTransfers bool ) IJobPartMgr {
404
+ destinationSAS string , scheduleTransfers bool , completionChan chan struct {} ) IJobPartMgr {
399
405
jpm := & jobPartMgr {jobMgr : jm , filename : planFile , sourceSAS : sourceSAS ,
400
406
destinationSAS : destinationSAS , pacer : jm .pacer ,
401
407
slicePool : jm .slicePool ,
402
408
cacheLimiter : jm .cacheLimiter ,
403
- fileCountLimiter : jm .fileCountLimiter }
409
+ fileCountLimiter : jm .fileCountLimiter ,
410
+ closeOnCompletion : completionChan ,
411
+ }
404
412
// If an existing plan MMF was supplied, re use it. Otherwise, init a new one.
405
413
if existingPlanMMF == nil {
406
414
jpm .planMMF = jpm .filename .Map ()
@@ -411,7 +419,6 @@ func (jm *jobMgr) AddJobPart(partNum PartNumber, planFile JobPartPlanFileName, e
411
419
jm .jobPartMgrs .Set (partNum , jpm )
412
420
jm .setFinalPartOrdered (partNum , jpm .planMMF .Plan ().IsFinalPart )
413
421
jm .setDirection (jpm .Plan ().FromTo )
414
- jpm .exclusiveDestinationMap = jm .getExclusiveDestinationMap (partNum , jpm .Plan ().FromTo )
415
422
416
423
jm .initMu .Lock ()
417
424
defer jm .initMu .Unlock ()
@@ -421,9 +428,12 @@ func (jm *jobMgr) AddJobPart(partNum PartNumber, planFile JobPartPlanFileName, e
421
428
securityInfoPersistenceManager : newSecurityInfoPersistenceManager (jm .ctx ),
422
429
folderCreationTracker : NewFolderCreationTracker (jpm .Plan ().Fpo , jpm .Plan ()),
423
430
folderDeletionManager : common .NewFolderDeletionManager (jm .ctx , jpm .Plan ().Fpo , logger ),
431
+ exclusiveDestinationMapHolder : & atomic.Value {},
424
432
}
433
+ jm .initState .exclusiveDestinationMapHolder .Store (common .NewExclusiveStringMap (jpm .Plan ().FromTo , runtime .GOOS ))
425
434
}
426
435
jpm .jobMgrInitState = jm .initState // so jpm can use it as much as desired without locking (since the only mutation is the init in jobManager. As far as jobPartManager is concerned, the init state is read-only
436
+ jpm .exclusiveDestinationMap = jm .getExclusiveDestinationMap (partNum , jpm .Plan ().FromTo )
427
437
428
438
if scheduleTransfers {
429
439
// If the schedule transfer is set to true
@@ -455,7 +465,6 @@ func (jm *jobMgr) AddJobOrder(order common.CopyJobPartOrderRequest) IJobPartMgr
455
465
jm .jobPartMgrs .Set (order .PartNum , jpm )
456
466
jm .setFinalPartOrdered (order .PartNum , jpm .planMMF .Plan ().IsFinalPart )
457
467
jm .setDirection (jpm .Plan ().FromTo )
458
- jpm .exclusiveDestinationMap = jm .getExclusiveDestinationMap (order .PartNum , jpm .Plan ().FromTo )
459
468
460
469
jm .initMu .Lock ()
461
470
defer jm .initMu .Unlock ()
@@ -465,9 +474,12 @@ func (jm *jobMgr) AddJobOrder(order common.CopyJobPartOrderRequest) IJobPartMgr
465
474
securityInfoPersistenceManager : newSecurityInfoPersistenceManager (jm .ctx ),
466
475
folderCreationTracker : NewFolderCreationTracker (jpm .Plan ().Fpo , jpm .Plan ()),
467
476
folderDeletionManager : common .NewFolderDeletionManager (jm .ctx , jpm .Plan ().Fpo , logger ),
477
+ exclusiveDestinationMapHolder : & atomic.Value {},
468
478
}
479
+ jm .initState .exclusiveDestinationMapHolder .Store (common .NewExclusiveStringMap (jpm .Plan ().FromTo , runtime .GOOS ))
469
480
}
470
481
jpm .jobMgrInitState = jm .initState // so jpm can use it as much as desired without locking (since the only mutation is the init in jobManager. As far as jobPartManager is concerned, the init state is read-only
482
+ jpm .exclusiveDestinationMap = jm .getExclusiveDestinationMap (order .PartNum , jpm .Plan ().FromTo )
471
483
472
484
jm .QueueJobParts (jpm )
473
485
return jpm
@@ -515,11 +527,7 @@ func (jm *jobMgr) setDirection(fromTo common.FromTo) {
515
527
516
528
// can't do this at time of constructing the jobManager, because it doesn't know fromTo at that time
517
529
func (jm * jobMgr ) getExclusiveDestinationMap (partNum PartNumber , fromTo common.FromTo ) * common.ExclusiveStringMap {
518
- // assume that first part is ordered before any others
519
- if partNum == 0 {
520
- jm .exclusiveDestinationMapHolder .Store (common .NewExclusiveStringMap (fromTo , runtime .GOOS ))
521
- }
522
- return jm .exclusiveDestinationMapHolder .Load ().(* common.ExclusiveStringMap )
530
+ return jm .initState .exclusiveDestinationMapHolder .Load ().(* common.ExclusiveStringMap )
523
531
}
524
532
525
533
func (jm * jobMgr ) HttpClient () * http.Client {
@@ -592,6 +600,10 @@ func (jm *jobMgr) reportJobPartDoneHandler() {
592
600
jobProgressInfo .transfersSkipped += partProgressInfo .transfersSkipped
593
601
jobProgressInfo .transfersFailed += partProgressInfo .transfersFailed
594
602
603
+ if partProgressInfo .completionChan != nil {
604
+ close (partProgressInfo .completionChan )
605
+ }
606
+
595
607
// If the last part is still awaited or other parts all still not complete,
596
608
// JobPart 0 status is not changed (unless we are cancelling)
597
609
haveFinalPart = atomic .LoadInt32 (& jm .atomicFinalPartOrderedIndicator ) == 1
@@ -1002,7 +1014,10 @@ func (jm *jobMgr) CancelPauseJobOrder(desiredJobStatus common.JobStatus) common.
1002
1014
}
1003
1015
return jr
1004
1016
}
1005
- /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
1017
+
1018
+ func (jm * jobMgr ) IsDaemon () bool {
1019
+ return jm .isDaemon
1020
+ }
1006
1021
1007
1022
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
1008
1023
0 commit comments