|
| 1 | +package main |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "encoding/json" |
| 6 | + "errors" |
| 7 | + "fmt" |
| 8 | + "io/fs" |
| 9 | + "math" |
| 10 | + "os" |
| 11 | + "strconv" |
| 12 | + "strings" |
| 13 | + "sync" |
| 14 | + "time" |
| 15 | + |
| 16 | + "github.com/reugn/go-quartz/logger" |
| 17 | + "github.com/reugn/go-quartz/quartz" |
| 18 | +) |
| 19 | + |
| 20 | +const dataFolder = "./store" |
| 21 | +const fileMode fs.FileMode = 0744 |
| 22 | + |
| 23 | +func init() { |
| 24 | + quartz.Sep = "_" |
| 25 | +} |
| 26 | + |
| 27 | +func main() { |
| 28 | + ctx, cancel := context.WithTimeout(context.Background(), 31*time.Second) |
| 29 | + defer cancel() |
| 30 | + |
| 31 | + if _, err := os.Stat(dataFolder); os.IsNotExist(err) { |
| 32 | + if err := os.Mkdir(dataFolder, fileMode); err != nil { |
| 33 | + logger.Warnf("Failed to create data folder: %s", err) |
| 34 | + return |
| 35 | + } |
| 36 | + } |
| 37 | + |
| 38 | + logger.Info("Starting scheduler") |
| 39 | + jobQueue := newJobQueue() |
| 40 | + scheduler := quartz.NewStdSchedulerWithOptions(quartz.StdSchedulerOptions{ |
| 41 | + OutdatedThreshold: time.Second, // considering file system I/O latency |
| 42 | + }, jobQueue) |
| 43 | + scheduler.Start(ctx) |
| 44 | + |
| 45 | + if jobQueue.Size() == 0 { |
| 46 | + logger.Info("Scheduling new jobs") |
| 47 | + jobDetail1 := quartz.NewJobDetail(&printJob{5}, quartz.NewJobKey("job1")) |
| 48 | + if err := scheduler.ScheduleJob(jobDetail1, quartz.NewSimpleTrigger(5*time.Second)); err != nil { |
| 49 | + logger.Warnf("Failed to schedule job: %s", jobDetail1.JobKey()) |
| 50 | + } |
| 51 | + jobDetail2 := quartz.NewJobDetail(&printJob{10}, quartz.NewJobKey("job2")) |
| 52 | + if err := scheduler.ScheduleJob(jobDetail2, quartz.NewSimpleTrigger(10*time.Second)); err != nil { |
| 53 | + logger.Warnf("Failed to schedule job: %s", jobDetail2.JobKey()) |
| 54 | + } |
| 55 | + } else { |
| 56 | + logger.Info("Job queue is not empty") |
| 57 | + } |
| 58 | + |
| 59 | + <-ctx.Done() |
| 60 | + |
| 61 | + scheduledJobs := jobQueue.ScheduledJobs() |
| 62 | + jobNames := make([]string, 0, len(scheduledJobs)) |
| 63 | + for _, job := range scheduledJobs { |
| 64 | + jobNames = append(jobNames, job.JobDetail().JobKey().String()) |
| 65 | + } |
| 66 | + logger.Infof("Jobs in queue: %s", jobNames) |
| 67 | +} |
| 68 | + |
| 69 | +// printJob |
| 70 | +type printJob struct { |
| 71 | + seconds int |
| 72 | +} |
| 73 | + |
| 74 | +var _ quartz.Job = (*printJob)(nil) |
| 75 | + |
| 76 | +func (job *printJob) Execute(_ context.Context) error { |
| 77 | + logger.Infof("PrintJob: %d\n", job.seconds) |
| 78 | + return nil |
| 79 | +} |
| 80 | +func (job *printJob) Description() string { |
| 81 | + return fmt.Sprintf("PrintJob%s%d", quartz.Sep, job.seconds) |
| 82 | +} |
| 83 | + |
| 84 | +// scheduledPrintJob |
| 85 | +type scheduledPrintJob struct { |
| 86 | + jobDetail *quartz.JobDetail |
| 87 | + trigger quartz.Trigger |
| 88 | + nextRunTime int64 |
| 89 | +} |
| 90 | + |
| 91 | +// serializedJob |
| 92 | +type serializedJob struct { |
| 93 | + Job string `json:"job"` |
| 94 | + JobKey string `json:"job_key"` |
| 95 | + Options *quartz.JobDetailOptions `json:"job_options"` |
| 96 | + Trigger string `json:"trigger"` |
| 97 | + NextRunTime int64 `json:"next_run_time"` |
| 98 | +} |
| 99 | + |
| 100 | +var _ quartz.ScheduledJob = (*scheduledPrintJob)(nil) |
| 101 | + |
| 102 | +func (job *scheduledPrintJob) JobDetail() *quartz.JobDetail { |
| 103 | + return job.jobDetail |
| 104 | +} |
| 105 | +func (job *scheduledPrintJob) Trigger() quartz.Trigger { |
| 106 | + return job.trigger |
| 107 | +} |
| 108 | +func (job *scheduledPrintJob) NextRunTime() int64 { |
| 109 | + return job.nextRunTime |
| 110 | +} |
| 111 | + |
| 112 | +// marshal returns the JSON encoding of the job. |
| 113 | +func marshal(job quartz.ScheduledJob) ([]byte, error) { |
| 114 | + var serialized serializedJob |
| 115 | + serialized.Job = job.JobDetail().Job().Description() |
| 116 | + serialized.JobKey = job.JobDetail().JobKey().String() |
| 117 | + serialized.Options = job.JobDetail().Options() |
| 118 | + serialized.Trigger = job.Trigger().Description() |
| 119 | + serialized.NextRunTime = job.NextRunTime() |
| 120 | + return json.Marshal(serialized) |
| 121 | +} |
| 122 | + |
| 123 | +// unmarshal parses the JSON-encoded job. |
| 124 | +func unmarshal(encoded []byte) (quartz.ScheduledJob, error) { |
| 125 | + var serialized serializedJob |
| 126 | + if err := json.Unmarshal(encoded, &serialized); err != nil { |
| 127 | + return nil, err |
| 128 | + } |
| 129 | + jobVals := strings.Split(serialized.Job, quartz.Sep) |
| 130 | + i, err := strconv.Atoi(jobVals[1]) |
| 131 | + if err != nil { |
| 132 | + return nil, err |
| 133 | + } |
| 134 | + job := &printJob{i} // assuming we know the job type |
| 135 | + jobKeyVals := strings.Split(serialized.JobKey, quartz.Sep) |
| 136 | + jobKey := quartz.NewJobKeyWithGroup(jobKeyVals[1], jobKeyVals[0]) |
| 137 | + jobDetail := quartz.NewJobDetailWithOptions(job, jobKey, serialized.Options) |
| 138 | + triggerOpts := strings.Split(serialized.Trigger, quartz.Sep) |
| 139 | + interval, _ := time.ParseDuration(triggerOpts[1]) |
| 140 | + trigger := quartz.NewSimpleTrigger(interval) // assuming we know the trigger type |
| 141 | + return &scheduledPrintJob{ |
| 142 | + jobDetail: jobDetail, |
| 143 | + trigger: trigger, |
| 144 | + nextRunTime: serialized.NextRunTime, |
| 145 | + }, nil |
| 146 | +} |
| 147 | + |
| 148 | +// jobQueue implements the quartz.JobQueue interface, using the file system |
| 149 | +// as the persistence layer. |
| 150 | +type jobQueue struct { |
| 151 | + mtx sync.Mutex |
| 152 | +} |
| 153 | + |
| 154 | +var _ quartz.JobQueue = (*jobQueue)(nil) |
| 155 | + |
| 156 | +// newJobQueue initializes and returns an empty jobQueue. |
| 157 | +func newJobQueue() *jobQueue { |
| 158 | + return &jobQueue{} |
| 159 | +} |
| 160 | + |
| 161 | +// Push inserts a new scheduled job to the queue. |
| 162 | +// This method is also used by the Scheduler to reschedule existing jobs that |
| 163 | +// have been dequeued for execution. |
| 164 | +func (jq *jobQueue) Push(job quartz.ScheduledJob) error { |
| 165 | + jq.mtx.Lock() |
| 166 | + defer jq.mtx.Unlock() |
| 167 | + logger.Tracef("Push: %s", job.JobDetail().JobKey()) |
| 168 | + serialized, err := marshal(job) |
| 169 | + if err != nil { |
| 170 | + return err |
| 171 | + } |
| 172 | + if err = os.WriteFile(fmt.Sprintf("%s/%d", dataFolder, job.NextRunTime()), |
| 173 | + serialized, fileMode); err != nil { |
| 174 | + logger.Errorf("Failed to write job: %s", err) |
| 175 | + return err |
| 176 | + } |
| 177 | + return nil |
| 178 | +} |
| 179 | + |
| 180 | +// Pop removes and returns the next scheduled job from the queue. |
| 181 | +func (jq *jobQueue) Pop() (quartz.ScheduledJob, error) { |
| 182 | + jq.mtx.Lock() |
| 183 | + defer jq.mtx.Unlock() |
| 184 | + logger.Trace("Pop") |
| 185 | + job, err := findHead() |
| 186 | + if err == nil { |
| 187 | + if err = os.Remove(fmt.Sprintf("%s/%d", dataFolder, job.NextRunTime())); err != nil { |
| 188 | + logger.Errorf("Failed to delete job: %s", err) |
| 189 | + return nil, err |
| 190 | + } |
| 191 | + return job, nil |
| 192 | + } |
| 193 | + logger.Errorf("Failed to find job: %s", err) |
| 194 | + return nil, err |
| 195 | +} |
| 196 | + |
| 197 | +// Head returns the first scheduled job without removing it from the queue. |
| 198 | +func (jq *jobQueue) Head() (quartz.ScheduledJob, error) { |
| 199 | + jq.mtx.Lock() |
| 200 | + defer jq.mtx.Unlock() |
| 201 | + logger.Trace("Head") |
| 202 | + job, err := findHead() |
| 203 | + if err != nil { |
| 204 | + logger.Errorf("Failed to find job: %s", err) |
| 205 | + } |
| 206 | + return job, err |
| 207 | +} |
| 208 | + |
| 209 | +func findHead() (quartz.ScheduledJob, error) { |
| 210 | + fileInfo, err := os.ReadDir(dataFolder) |
| 211 | + if err != nil { |
| 212 | + return nil, err |
| 213 | + } |
| 214 | + var lastUpdate int64 = math.MaxInt64 |
| 215 | + for _, file := range fileInfo { |
| 216 | + if !file.IsDir() { |
| 217 | + time, err := strconv.ParseInt(file.Name(), 10, 64) |
| 218 | + if err == nil && time < lastUpdate { |
| 219 | + lastUpdate = time |
| 220 | + } |
| 221 | + } |
| 222 | + } |
| 223 | + if lastUpdate == math.MaxInt64 { |
| 224 | + return nil, errors.New("no jobs found") |
| 225 | + } |
| 226 | + data, err := os.ReadFile(fmt.Sprintf("%s/%d", dataFolder, lastUpdate)) |
| 227 | + if err != nil { |
| 228 | + return nil, err |
| 229 | + } |
| 230 | + job, err := unmarshal(data) |
| 231 | + if err != nil { |
| 232 | + return nil, err |
| 233 | + } |
| 234 | + return job, nil |
| 235 | +} |
| 236 | + |
| 237 | +// Remove removes and returns the scheduled job with the specified key. |
| 238 | +func (jq *jobQueue) Remove(jobKey *quartz.JobKey) (quartz.ScheduledJob, error) { |
| 239 | + jq.mtx.Lock() |
| 240 | + defer jq.mtx.Unlock() |
| 241 | + logger.Trace("Remove") |
| 242 | + fileInfo, err := os.ReadDir(dataFolder) |
| 243 | + if err != nil { |
| 244 | + return nil, err |
| 245 | + } |
| 246 | + for _, file := range fileInfo { |
| 247 | + if !file.IsDir() { |
| 248 | + path := fmt.Sprintf("%s/%s", dataFolder, file.Name()) |
| 249 | + data, err := os.ReadFile(path) |
| 250 | + if err == nil { |
| 251 | + job, err := unmarshal(data) |
| 252 | + if err == nil { |
| 253 | + if jobKey.Equals(job.JobDetail().JobKey()) { |
| 254 | + if err = os.Remove(path); err == nil { |
| 255 | + return job, nil |
| 256 | + } |
| 257 | + } |
| 258 | + } |
| 259 | + } |
| 260 | + } |
| 261 | + } |
| 262 | + return nil, errors.New("no jobs found") |
| 263 | +} |
| 264 | + |
| 265 | +// ScheduledJobs returns the slice of all scheduled jobs in the queue. |
| 266 | +func (jq *jobQueue) ScheduledJobs() []quartz.ScheduledJob { |
| 267 | + jq.mtx.Lock() |
| 268 | + defer jq.mtx.Unlock() |
| 269 | + logger.Trace("ScheduledJobs") |
| 270 | + var jobs []quartz.ScheduledJob |
| 271 | + fileInfo, err := os.ReadDir(dataFolder) |
| 272 | + if err != nil { |
| 273 | + return jobs |
| 274 | + } |
| 275 | + for _, file := range fileInfo { |
| 276 | + if !file.IsDir() { |
| 277 | + data, err := os.ReadFile(fmt.Sprintf("%s/%s", dataFolder, file.Name())) |
| 278 | + if err == nil { |
| 279 | + job, err := unmarshal(data) |
| 280 | + if err == nil { |
| 281 | + jobs = append(jobs, job) |
| 282 | + } |
| 283 | + } |
| 284 | + } |
| 285 | + } |
| 286 | + return jobs |
| 287 | +} |
| 288 | + |
| 289 | +// Size returns the size of the job queue. |
| 290 | +func (jq *jobQueue) Size() int { |
| 291 | + jq.mtx.Lock() |
| 292 | + defer jq.mtx.Unlock() |
| 293 | + logger.Trace("Size") |
| 294 | + files, _ := os.ReadDir(dataFolder) |
| 295 | + return len(files) |
| 296 | +} |
| 297 | + |
| 298 | +// Clear clears the job queue. |
| 299 | +func (jq *jobQueue) Clear() error { |
| 300 | + jq.mtx.Lock() |
| 301 | + defer jq.mtx.Unlock() |
| 302 | + logger.Trace("Clear") |
| 303 | + return os.RemoveAll(dataFolder) |
| 304 | +} |
0 commit comments