From 22459ee17a3ba0b4487f975b6ebe630cab2d9feb Mon Sep 17 00:00:00 2001 From: George Goldberg Date: Thu, 17 Aug 2017 15:05:17 +0100 Subject: PLT-7302: Aggregate Elasticsearch indexes over a certain age. (#7224) * PLT-7302: Aggregate Elasticsearch indexes over a certain age. This is done by a scheduled daily job, in order to keep the shard count to a sensible level in Elasticsearch. * Use map[string]string instead of StringMap --- jobs/jobs.go | 6 +++--- jobs/jobs_watcher.go | 24 +++++++++++++----------- jobs/schedulers.go | 23 ++++++++++++++++++++++- jobs/workers.go | 29 ++++++++++++++++++++++++----- 4 files changed, 62 insertions(+), 20 deletions(-) (limited to 'jobs') diff --git a/jobs/jobs.go b/jobs/jobs.go index 1986b22b6..e478c5a19 100644 --- a/jobs/jobs.go +++ b/jobs/jobs.go @@ -16,7 +16,7 @@ const ( CANCEL_WATCHER_POLLING_INTERVAL = 5000 ) -func CreateJob(jobType string, jobData map[string]interface{}) (*model.Job, *model.AppError) { +func CreateJob(jobType string, jobData map[string]string) (*model.Job, *model.AppError) { job := model.Job{ Id: model.NewId(), Type: jobType, @@ -70,9 +70,9 @@ func SetJobError(job *model.Job, jobError *model.AppError) *model.AppError { job.Status = model.JOB_STATUS_ERROR job.Progress = -1 if job.Data == nil { - job.Data = make(map[string]interface{}) + job.Data = make(map[string]string) } - job.Data["error"] = jobError + job.Data["error"] = jobError.Error() if result := <-Srv.Store.Job().UpdateOptimistically(job, model.JOB_STATUS_IN_PROGRESS); result.Err != nil { return result.Err diff --git a/jobs/jobs_watcher.go b/jobs/jobs_watcher.go index 9ba68e85e..83d4249eb 100644 --- a/jobs/jobs_watcher.go +++ b/jobs/jobs_watcher.go @@ -64,25 +64,27 @@ func (watcher *Watcher) PollAndNotify() { if result := <-Srv.Store.Job().GetAllByStatus(model.JOB_STATUS_PENDING); result.Err != nil { l4g.Error("Error occured getting all pending statuses: %v", result.Err.Error()) } else { - jobStatuses := result.Data.([]*model.Job) + jobs := result.Data.([]*model.Job) - for _, js := range jobStatuses { - j := model.Job{ - Type: js.Type, - Id: js.Id, - } - - if js.Type == model.JOB_TYPE_DATA_RETENTION { + for _, job := range jobs { + if job.Type == model.JOB_TYPE_DATA_RETENTION { if watcher.workers.DataRetention != nil { select { - case watcher.workers.DataRetention.JobChannel() <- j: + case watcher.workers.DataRetention.JobChannel() <- *job: default: } } - } else if js.Type == model.JOB_TYPE_ELASTICSEARCH_POST_INDEXING { + } else if job.Type == model.JOB_TYPE_ELASTICSEARCH_POST_INDEXING { if watcher.workers.ElasticsearchIndexing != nil { select { - case watcher.workers.ElasticsearchIndexing.JobChannel() <- j: + case watcher.workers.ElasticsearchIndexing.JobChannel() <- *job: + default: + } + } + } else if job.Type == model.JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION { + if watcher.workers.ElasticsearchAggregation != nil { + select { + case watcher.workers.ElasticsearchAggregation.JobChannel() <- *job: default: } } diff --git a/jobs/schedulers.go b/jobs/schedulers.go index 73ec6661a..2f4e18001 100644 --- a/jobs/schedulers.go +++ b/jobs/schedulers.go @@ -16,7 +16,8 @@ import ( type Schedulers struct { startOnce sync.Once - DataRetention model.Scheduler + DataRetention model.Scheduler + ElasticsearchAggregation model.Scheduler listenerId string } @@ -28,6 +29,10 @@ func InitSchedulers() *Schedulers { schedulers.DataRetention = dataRetentionInterface.MakeScheduler() } + if elasticsearchAggregatorInterface := ejobs.GetElasticsearchAggregatorInterface(); elasticsearchAggregatorInterface != nil { + schedulers.ElasticsearchAggregation = elasticsearchAggregatorInterface.MakeScheduler() + } + return schedulers } @@ -38,6 +43,10 @@ func (schedulers *Schedulers) Start() *Schedulers { if schedulers.DataRetention != nil && *utils.Cfg.DataRetentionSettings.Enable { go schedulers.DataRetention.Run() } + + if schedulers.ElasticsearchAggregation != nil && *utils.Cfg.ElasticsearchSettings.EnableIndexing { + go schedulers.ElasticsearchAggregation.Run() + } }) schedulers.listenerId = utils.AddConfigListener(schedulers.handleConfigChange) @@ -53,6 +62,14 @@ func (schedulers *Schedulers) handleConfigChange(oldConfig *model.Config, newCon schedulers.DataRetention.Stop() } } + + if schedulers.ElasticsearchAggregation != nil { + if !*oldConfig.ElasticsearchSettings.EnableIndexing && *newConfig.ElasticsearchSettings.EnableIndexing { + go schedulers.ElasticsearchAggregation.Run() + } else if *oldConfig.ElasticsearchSettings.EnableIndexing && !*newConfig.ElasticsearchSettings.EnableIndexing { + schedulers.ElasticsearchAggregation.Stop() + } + } } func (schedulers *Schedulers) Stop() *Schedulers { @@ -62,6 +79,10 @@ func (schedulers *Schedulers) Stop() *Schedulers { schedulers.DataRetention.Stop() } + if schedulers.ElasticsearchAggregation != nil && *utils.Cfg.ElasticsearchSettings.EnableIndexing { + schedulers.ElasticsearchAggregation.Stop() + } + l4g.Info("Stopped schedulers") return schedulers diff --git a/jobs/workers.go b/jobs/workers.go index 592c001fb..fe38641e7 100644 --- a/jobs/workers.go +++ b/jobs/workers.go @@ -16,16 +16,15 @@ type Workers struct { startOnce sync.Once watcher *Watcher - DataRetention model.Worker - ElasticsearchIndexing model.Worker + DataRetention model.Worker + ElasticsearchIndexing model.Worker + ElasticsearchAggregation model.Worker listenerId string } func InitWorkers() *Workers { - workers := &Workers{ - // SearchIndexing: MakeTestJob(s, "SearchIndexing"), - } + workers := &Workers{} workers.watcher = MakeWatcher(workers) if dataRetentionInterface := ejobs.GetDataRetentionInterface(); dataRetentionInterface != nil { @@ -36,6 +35,10 @@ func InitWorkers() *Workers { workers.ElasticsearchIndexing = elasticsearchIndexerInterface.MakeWorker() } + if elasticsearchAggregatorInterface := ejobs.GetElasticsearchAggregatorInterface(); elasticsearchAggregatorInterface != nil { + workers.ElasticsearchAggregation = elasticsearchAggregatorInterface.MakeWorker() + } + return workers } @@ -51,6 +54,10 @@ func (workers *Workers) Start() *Workers { go workers.ElasticsearchIndexing.Run() } + if workers.ElasticsearchAggregation != nil && *utils.Cfg.ElasticsearchSettings.EnableIndexing { + go workers.ElasticsearchAggregation.Run() + } + go workers.watcher.Start() }) @@ -75,6 +82,14 @@ func (workers *Workers) handleConfigChange(oldConfig *model.Config, newConfig *m workers.ElasticsearchIndexing.Stop() } } + + if workers.ElasticsearchAggregation != nil { + if !*oldConfig.ElasticsearchSettings.EnableIndexing && *newConfig.ElasticsearchSettings.EnableIndexing { + go workers.ElasticsearchAggregation.Run() + } else if *oldConfig.ElasticsearchSettings.EnableIndexing && !*newConfig.ElasticsearchSettings.EnableIndexing { + workers.ElasticsearchAggregation.Stop() + } + } } func (workers *Workers) Stop() *Workers { @@ -90,6 +105,10 @@ func (workers *Workers) Stop() *Workers { workers.ElasticsearchIndexing.Stop() } + if workers.ElasticsearchAggregation != nil && *utils.Cfg.ElasticsearchSettings.EnableIndexing { + workers.ElasticsearchAggregation.Stop() + } + l4g.Info("Stopped workers") return workers -- cgit v1.2.3-1-g7c22