@@ -28,17 +28,32 @@ logger.prefix = path.basename(module.filename, '.js');
28
28
29
29
var RETRY_BUCKETS = config . retryBuckets ;
30
30
var BUCKET_PREFIX = 'bucket' ;
31
- var FIRST_CHARCODE = "A" . charCodeAt ( 0 ) ; //Needed to auto-create bucket names (bucketA, bucketB,...)
31
+ var FIRST_CHARCODE = 'A' . charCodeAt ( 0 ) ; //Needed to auto-create bucket names (bucketA, bucketB,...)
32
32
33
33
var stopped ;
34
34
35
- var lrangeAndDelete = "\
35
+ var getTasksAndRepsuh = "\
36
36
local bucketName = KEYS[1]\n\
37
+ local serviceQueue = KEYS[2]\n\
37
38
local tasks = redis.call('lrange', bucketName, 0, -1)\n\
38
- redis.call('del', bucketName)\n\
39
+ local size = table.getn(tasks)\n\
40
+ if size > 0 then\n\
41
+ redis.call('lpush', serviceQueue, unpack(tasks))\n\
42
+ redis.call('del', bucketName)\n\
43
+ end\n\
39
44
return tasks\n\
40
45
" ;
41
46
47
+ /**
48
+ * Get bucket name
49
+ * @param index Position on buckets array
50
+ * @param service Service name
51
+ * @returns {string } The name of the bucket
52
+ */
53
+ function getBucketName ( index , service ) {
54
+ return BUCKET_PREFIX + String . fromCharCode ( FIRST_CHARCODE + index ) + ':' + service ;
55
+ }
56
+
42
57
/**
43
58
*
44
59
* @param task The task to be pushed on the bucket
@@ -62,7 +77,7 @@ exports.insertOnBucket = function (task, error, maxRetry, callback) {
62
77
63
78
} else {
64
79
65
- var bucketName = BUCKET_PREFIX + String . fromCharCode ( FIRST_CHARCODE + retried ) ;
80
+ var bucketName = getBucketName ( retried , task . service ) ;
66
81
var bucketTime = RETRY_BUCKETS [ retried ] ;
67
82
var db = dbCluster . getDb ( bucketName ) ;
68
83
@@ -98,105 +113,62 @@ exports.insertOnBucket = function (task, error, maxRetry, callback) {
98
113
99
114
exports . initBucketTimers = function ( ) {
100
115
101
- function repushBucketTasks ( bucketName , cb ) {
116
+ function repushBucketTasks ( bucketName , serviceQueue , cb ) {
102
117
103
118
var db = dbCluster . getDb ( bucketName ) ;
104
119
105
120
//Get all tasks from the bucket
106
- db . eval ( lrangeAndDelete , 1 , bucketName , function ( errEval , data ) {
107
-
121
+ db . eval ( getTasksAndRepsuh , 2 , bucketName , serviceQueue , function ( errEval , data ) {
108
122
if ( ! errEval && data . length > 0 ) {
109
-
110
- //Insert on the queue again (using the standard method: as listener)
111
- async . map ( data ,
112
-
113
- //Iterate function
114
- function ( task , callback ) {
115
- router . route ( JSON . parse ( task ) , callback ) ;
116
- } ,
117
-
118
- //Function to be called when all items of the array has been processed
119
- function ( err , results ) {
120
-
121
- if ( err ) {
122
-
123
- logger . error ( 'Error routing tasks to be respushed' , { op : 'REPUSH BUCKET TASKS' , error : err } ) ;
124
- cb ( ) ;
125
-
126
- } else {
127
-
128
- //Get tasks for each service
129
- var putArguments = { } ;
130
- for ( var i = 0 ; i < results . length ; i ++ ) {
131
- var service = results [ i ] . service ;
132
- var task = results [ i ] . task ;
133
- if ( ! putArguments [ service ] ) {
134
- putArguments [ service ] = [ ] ;
135
- }
136
- putArguments [ service ] . push ( task ) ;
137
- }
138
-
139
- //For each service, repush all the tasks at the same time
140
- //In order to improve efficiency, only up to 100 tasks will be repushed at the same time
141
- var putFunctions = [ ] ;
142
- for ( service in putArguments ) {
143
- if ( putArguments . hasOwnProperty ( service ) ) {
144
- var concurrentPushes = MG . CONCURRENT_PUSHES ;
145
- for ( i = 0 ; i < putArguments [ service ] . length ; i += concurrentPushes ) {
146
- putFunctions . push ( store . put . bind ( this , service ,
147
- putArguments [ service ] . slice ( i , i + concurrentPushes ) ) ) ;
148
- }
149
- }
150
- }
151
-
152
- async . series ( putFunctions , cb ) ;
153
- }
154
- }
155
- ) ;
156
-
123
+ logger . info ( 'Tasks repushed' , { op : 'REPUSH BUCKET TASKS' , tasks : data } ) ;
124
+ cb ( ) ;
157
125
} else if ( errEval ) {
158
-
159
126
logger . error ( 'Error getting bucket elements' , { op : 'RESPUSH BUCKET TASKS' , error : errEval } ) ;
160
127
cb ( ) ;
161
-
162
128
} else if ( data . length === 0 ) {
163
-
164
129
logger . debug ( 'Empty bucket' , { op : 'REPUSH BUCKET TASKS' } ) ;
165
130
cb ( ) ;
166
-
167
131
}
168
132
} ) ;
169
133
}
170
134
135
+ //Stop condition
171
136
stopped = false ;
172
137
173
- RETRY_BUCKETS . forEach ( function ( bucket , index ) {
138
+ var services = router . getQueues ( ) ;
174
139
175
- var bucketName = BUCKET_PREFIX + String . fromCharCode ( FIRST_CHARCODE + index ) ;
176
- var intervalTime = bucket * 2 * 1000 ;
140
+ for ( var service in services ) {
141
+ if ( services . hasOwnProperty ( service ) ) {
177
142
178
- async . whilst (
143
+ RETRY_BUCKETS . forEach ( function ( bucketTimeout , index ) {
179
144
180
- //Stop condition
181
- function ( ) {
182
- return stopped === false ;
183
- } ,
145
+ var serviceName = services [ service ] ;
146
+ var bucketName = getBucketName ( index , serviceName ) ;
147
+ var intervalTime = bucketTimeout * 2 * 1000 ;
184
148
185
- //Function to be executed forever
186
- function ( callback ) {
187
- repushBucketTasks ( bucketName , function ( ) {
188
- setTimeout ( callback , intervalTime ) ;
189
- } )
190
- } ,
149
+ async . whilst (
191
150
192
- //Function to be called on error
193
- function ( err ) {
194
- logger . error ( 'Error processing respush' , { op : 'BUCKET TIMER' , error : err } ) ;
195
- }
196
- ) ;
151
+ //Stop condition
152
+ function ( ) {
153
+ return stopped === false ;
154
+ } ,
197
155
198
- } ) ;
156
+ //Function to be executed forever
157
+ function ( callback ) {
158
+ repushBucketTasks ( bucketName , serviceName , function ( ) {
159
+ setTimeout ( callback , intervalTime ) ;
160
+ } )
161
+ } ,
199
162
163
+ //Function to be called on error
164
+ function ( err ) {
165
+ logger . error ( 'Error processing respush' , { op : 'BUCKET TIMER' , error : err } ) ;
166
+ }
167
+ ) ;
168
+
169
+ } ) ;
170
+ }
171
+ }
200
172
} ;
201
173
202
174
exports . stopTimers = function ( ) {
0 commit comments